2 * linux/drivers/video/omap2/dss/dsi.c
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #define DSS_SUBSYS_NAME "DSI"
22 #include <linux/kernel.h>
24 #include <linux/clk.h>
25 #include <linux/device.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/workqueue.h>
30 #include <linux/mutex.h>
31 #include <linux/seq_file.h>
32 #include <linux/kfifo.h>
34 #include <mach/board.h>
35 #include <mach/display.h>
36 #include <mach/clock.h>
40 /*#define VERBOSE_IRQ*/
42 #define DSI_BASE 0x4804FC00
44 struct dsi_reg { u16 idx; };
46 #define DSI_REG(idx) ((const struct dsi_reg) { idx })
48 #define DSI_SZ_REGS SZ_1K
49 /* DSI Protocol Engine */
51 #define DSI_REVISION DSI_REG(0x0000)
52 #define DSI_SYSCONFIG DSI_REG(0x0010)
53 #define DSI_SYSSTATUS DSI_REG(0x0014)
54 #define DSI_IRQSTATUS DSI_REG(0x0018)
55 #define DSI_IRQENABLE DSI_REG(0x001C)
56 #define DSI_CTRL DSI_REG(0x0040)
57 #define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
58 #define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
59 #define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
60 #define DSI_CLK_CTRL DSI_REG(0x0054)
61 #define DSI_TIMING1 DSI_REG(0x0058)
62 #define DSI_TIMING2 DSI_REG(0x005C)
63 #define DSI_VM_TIMING1 DSI_REG(0x0060)
64 #define DSI_VM_TIMING2 DSI_REG(0x0064)
65 #define DSI_VM_TIMING3 DSI_REG(0x0068)
66 #define DSI_CLK_TIMING DSI_REG(0x006C)
67 #define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
68 #define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
69 #define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
70 #define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
71 #define DSI_VM_TIMING4 DSI_REG(0x0080)
72 #define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
73 #define DSI_VM_TIMING5 DSI_REG(0x0088)
74 #define DSI_VM_TIMING6 DSI_REG(0x008C)
75 #define DSI_VM_TIMING7 DSI_REG(0x0090)
76 #define DSI_STOPCLK_TIMING DSI_REG(0x0094)
77 #define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
78 #define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
79 #define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
80 #define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
81 #define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
82 #define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
83 #define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
87 #define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
88 #define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
89 #define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
90 #define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
92 /* DSI_PLL_CTRL_SCP */
94 #define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
95 #define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
96 #define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
97 #define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
98 #define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
100 #define REG_GET(idx, start, end) \
101 FLD_GET(dsi_read_reg(idx), start, end)
103 #define REG_FLD_MOD(idx, val, start, end) \
104 dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end))
106 /* Global interrupts */
107 #define DSI_IRQ_VC0 (1 << 0)
108 #define DSI_IRQ_VC1 (1 << 1)
109 #define DSI_IRQ_VC2 (1 << 2)
110 #define DSI_IRQ_VC3 (1 << 3)
111 #define DSI_IRQ_WAKEUP (1 << 4)
112 #define DSI_IRQ_RESYNC (1 << 5)
113 #define DSI_IRQ_PLL_LOCK (1 << 7)
114 #define DSI_IRQ_PLL_UNLOCK (1 << 8)
115 #define DSI_IRQ_PLL_RECALL (1 << 9)
116 #define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
117 #define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
118 #define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
119 #define DSI_IRQ_TE_TRIGGER (1 << 16)
120 #define DSI_IRQ_ACK_TRIGGER (1 << 17)
121 #define DSI_IRQ_SYNC_LOST (1 << 18)
122 #define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
123 #define DSI_IRQ_TA_TIMEOUT (1 << 20)
124 #define DSI_IRQ_ERROR_MASK \
125 (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
127 #define DSI_IRQ_CHANNEL_MASK 0xf
129 /* Virtual channel interrupts */
130 #define DSI_VC_IRQ_CS (1 << 0)
131 #define DSI_VC_IRQ_ECC_CORR (1 << 1)
132 #define DSI_VC_IRQ_PACKET_SENT (1 << 2)
133 #define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
134 #define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
135 #define DSI_VC_IRQ_BTA (1 << 5)
136 #define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
137 #define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
138 #define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
139 #define DSI_VC_IRQ_ERROR_MASK \
140 (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
141 DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
142 DSI_VC_IRQ_FIFO_TX_UDF)
144 /* ComplexIO interrupts */
145 #define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
146 #define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
147 #define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
148 #define DSI_CIO_IRQ_ERRESC1 (1 << 5)
149 #define DSI_CIO_IRQ_ERRESC2 (1 << 6)
150 #define DSI_CIO_IRQ_ERRESC3 (1 << 7)
151 #define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
152 #define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
153 #define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
154 #define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
155 #define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
156 #define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
157 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
158 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
159 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
160 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
161 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
162 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
163 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
164 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
166 #define DSI_DT_DCS_SHORT_WRITE_0 0x05
167 #define DSI_DT_DCS_SHORT_WRITE_1 0x15
168 #define DSI_DT_DCS_READ 0x06
169 #define DSI_DT_SET_MAX_RET_PKG_SIZE 0x37
170 #define DSI_DT_NULL_PACKET 0x09
171 #define DSI_DT_DCS_LONG_WRITE 0x39
173 #define DSI_DT_RX_ACK_WITH_ERR 0x02
174 #define DSI_DT_RX_DCS_LONG_READ 0x1c
175 #define DSI_DT_RX_SHORT_READ_1 0x21
176 #define DSI_DT_RX_SHORT_READ_2 0x22
178 #define FINT_MAX 2100000
179 #define FINT_MIN 750000
180 #define REGN_MAX (1 << 7)
181 #define REGM_MAX ((1 << 11) - 1)
182 #define REGM3_MAX (1 << 4)
183 #define REGM4_MAX (1 << 4)
187 DSI_FIFO_SIZE_32 = 1,
188 DSI_FIFO_SIZE_64 = 2,
189 DSI_FIFO_SIZE_96 = 3,
190 DSI_FIFO_SIZE_128 = 4,
193 #define DSI_CMD_FIFO_LEN 16
195 struct dsi_cmd_update {
203 struct dsi_cmd_mem_read {
211 struct completion *completion;
214 struct dsi_cmd_test {
217 struct completion *completion;
227 DSI_CMD_SET_UPDATE_MODE,
232 struct dsi_cmd_item {
233 struct omap_display *display;
238 struct dsi_cmd_update r;
239 struct completion *sync;
240 struct dsi_cmd_mem_read mem_read;
241 struct dsi_cmd_test test;
243 enum omap_dss_update_mode update_mode;
253 unsigned long dsi1_pll_fclk; /* Hz */
254 unsigned long dsi2_pll_fclk; /* Hz */
255 unsigned long dsiphy; /* Hz */
256 unsigned long ddr_clk; /* Hz */
259 struct omap_display *display;
260 enum fifo_size fifo_size;
261 int dest_per; /* destination peripheral 0-3 */
268 struct completion bta_completion;
270 struct work_struct framedone_work;
271 struct work_struct process_work;
272 struct workqueue_struct *workqueue;
274 enum omap_dss_update_mode user_update_mode;
275 enum omap_dss_update_mode target_update_mode;
276 enum omap_dss_update_mode update_mode;
278 int framedone_scheduled; /* helps to catch strange framedone bugs */
280 unsigned long cache_req_pck;
281 unsigned long cache_clk_freq;
282 struct dsi_clock_info cache_cinfo;
284 struct kfifo *cmd_fifo;
286 struct completion cmd_done;
287 atomic_t cmd_fifo_full;
288 atomic_t cmd_pending;
290 bool autoupdate_setup;
293 ktime_t perf_setup_time;
294 ktime_t perf_start_time;
295 int perf_measure_frames;
309 static unsigned int dsi_perf;
310 module_param_named(dsi_perf, dsi_perf, bool, 0644);
313 static void dsi_process_cmd_fifo(struct work_struct *work);
314 static void dsi_push_update(struct omap_display *display,
315 int x, int y, int w, int h);
316 static void dsi_push_autoupdate(struct omap_display *display);
318 static inline void dsi_write_reg(const struct dsi_reg idx, u32 val)
320 __raw_writel(val, dsi.base + idx.idx);
323 static inline u32 dsi_read_reg(const struct dsi_reg idx)
325 return __raw_readl(dsi.base + idx.idx);
329 void dsi_save_context(void)
333 void dsi_restore_context(void)
337 static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
342 while (REG_GET(idx, bitnum, bitnum) != value) {
351 static void perf_mark_setup(void)
353 dsi.perf_setup_time = ktime_get();
356 static void perf_mark_start(void)
358 dsi.perf_start_time = ktime_get();
361 static void perf_show(const char *name)
363 ktime_t t, setup_time, trans_time;
365 u32 setup_us, trans_us, total_us;
366 const int numframes = 100;
367 static u32 s_trans_us, s_min_us = 0xffffffff, s_max_us;
372 if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
377 setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time);
378 setup_us = (u32)ktime_to_us(setup_time);
382 trans_time = ktime_sub(t, dsi.perf_start_time);
383 trans_us = (u32)ktime_to_us(trans_time);
387 total_us = setup_us + trans_us;
389 total_bytes = dsi.update_region.w *
390 dsi.update_region.h *
391 dsi.update_region.bytespp;
393 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
394 dsi.perf_measure_frames++;
396 if (trans_us < s_min_us)
399 if (trans_us > s_max_us)
402 s_trans_us += trans_us;
404 if (dsi.perf_measure_frames < numframes)
407 DSSINFO("%s update: %d frames in %u us "
408 "(min/max/avg %u/%u/%u), %u fps\n",
413 s_trans_us / numframes,
414 1000*1000 / (s_trans_us / numframes));
416 dsi.perf_measure_frames = 0;
418 s_min_us = 0xffffffff;
421 DSSINFO("%s update %u us + %u us = %u us (%uHz), %u bytes, "
427 1000*1000 / total_us,
429 total_bytes * 1000 / total_us);
433 #define perf_mark_setup()
434 #define perf_mark_start()
438 static void print_irq_status(u32 status)
441 if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
444 printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
447 if (status & DSI_IRQ_##x) \
473 static void print_irq_status_vc(int channel, u32 status)
476 if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
479 printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
482 if (status & DSI_VC_IRQ_##x) \
499 static void print_irq_status_cio(u32 status)
501 printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
504 if (status & DSI_CIO_IRQ_##x) \
518 PIS(ERRCONTENTIONLP0_1);
519 PIS(ERRCONTENTIONLP1_1);
520 PIS(ERRCONTENTIONLP0_2);
521 PIS(ERRCONTENTIONLP1_2);
522 PIS(ERRCONTENTIONLP0_3);
523 PIS(ERRCONTENTIONLP1_3);
524 PIS(ULPSACTIVENOT_ALL0);
525 PIS(ULPSACTIVENOT_ALL1);
531 static int debug_irq;
533 /* called from dss */
534 void dsi_irq_handler(void)
536 u32 irqstatus, vcstatus, ciostatus;
539 irqstatus = dsi_read_reg(DSI_IRQSTATUS);
541 if (irqstatus & DSI_IRQ_ERROR_MASK) {
542 DSSERR("DSI error, irqstatus %x\n", irqstatus);
543 print_irq_status(irqstatus);
544 } else if (debug_irq) {
545 print_irq_status(irqstatus);
548 for (i = 0; i < 4; ++i) {
549 if ((irqstatus & (1<<i)) == 0)
552 vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
554 if (vcstatus & DSI_VC_IRQ_BTA)
555 complete(&dsi.bta_completion);
557 if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
558 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
560 print_irq_status_vc(i, vcstatus);
561 } else if (debug_irq) {
562 print_irq_status_vc(i, vcstatus);
565 dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
568 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
569 ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
571 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
573 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
574 print_irq_status_cio(ciostatus);
577 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
581 static void _dsi_initialize_irq(void)
586 /* disable all interrupts */
587 dsi_write_reg(DSI_IRQENABLE, 0);
588 for (i = 0; i < 4; ++i)
589 dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
590 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
592 /* clear interrupt status */
593 l = dsi_read_reg(DSI_IRQSTATUS);
594 dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
596 for (i = 0; i < 4; ++i) {
597 l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
598 dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
601 l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
602 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
604 /* enable error irqs */
605 l = DSI_IRQ_ERROR_MASK;
606 dsi_write_reg(DSI_IRQENABLE, l);
608 l = DSI_VC_IRQ_ERROR_MASK;
609 for (i = 0; i < 4; ++i)
610 dsi_write_reg(DSI_VC_IRQENABLE(i), l);
612 /* XXX zonda responds incorrectly, causing control error:
613 Exit from LP-ESC mode to LP11 uses wrong transition states on the
614 data lines LP0 and LN0. */
615 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE,
616 -1 & (~DSI_CIO_IRQ_ERRCONTROL2));
619 static void dsi_vc_enable_bta_irq(int channel)
623 l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
625 dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
628 static void dsi_vc_disable_bta_irq(int channel)
632 l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
633 l &= ~DSI_VC_IRQ_BTA;
634 dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
637 /* DSI func clock. this could also be DSI2_PLL_FCLK */
638 static inline void enable_clocks(bool enable)
641 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
643 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
646 /* source clock for DSI PLL. this could also be PCLKFREE */
647 static inline void dsi_enable_pll_clock(bool enable)
650 dss_clk_enable(DSS_CLK_FCK2);
652 dss_clk_disable(DSS_CLK_FCK2);
654 if (enable && dsi.pll_locked) {
655 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
656 DSSERR("cannot lock PLL when enabling clocks\n");
661 static void _dsi_print_reset_status(void)
668 /* A dummy read using the SCP interface to any DSIPHY register is
669 * required after DSIPHY reset to complete the reset of the DSI complex
671 l = dsi_read_reg(DSI_DSIPHY_CFG5);
673 printk(KERN_DEBUG "DSI resets: ");
675 l = dsi_read_reg(DSI_PLL_STATUS);
676 printk("PLL (%d) ", FLD_GET(l, 0, 0));
678 l = dsi_read_reg(DSI_COMPLEXIO_CFG1);
679 printk("CIO (%d) ", FLD_GET(l, 29, 29));
681 l = dsi_read_reg(DSI_DSIPHY_CFG5);
682 printk("PHY (%x, %d, %d, %d)\n",
689 #define _dsi_print_reset_status()
692 static inline int dsi_if_enable(bool enable)
694 DSSDBG("dsi_if_enable(%d)\n", enable);
696 enable = enable ? 1 : 0;
697 REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */
699 if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) {
700 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
707 static unsigned long dsi_fclk_rate(void)
711 if (dss_get_dsi_clk_source() == 0) {
712 /* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
713 r = dss_clk_get_rate(DSS_CLK_FCK1);
715 /* DSI FCLK source is DSI2_PLL_FCLK */
716 r = dsi.dsi2_pll_fclk;
722 static int dsi_set_lp_clk_divisor(void)
725 unsigned long dsi_fclk;
728 /* LP_CLK_DIVISOR, DSI fclk/n, should be 20MHz - 32kHz */
730 dsi_fclk = dsi_fclk_rate();
732 for (n = 1; n < (1 << 13) - 1; ++n) {
734 if (mhz <= 20*1000*1000)
738 if (n == (1 << 13) - 1) {
739 DSSERR("Failed to find LP_CLK_DIVISOR\n");
743 DSSDBG("LP_CLK_DIV %d, LP_CLK %ld\n", n, mhz);
745 REG_FLD_MOD(DSI_CLK_CTRL, n, 12, 0); /* LP_CLK_DIVISOR */
746 if (dsi_fclk > 30*1000*1000)
747 REG_FLD_MOD(DSI_CLK_CTRL, 1, 21, 21); /* LP_RX_SYNCHRO_ENABLE */
753 enum dsi_pll_power_state {
754 DSI_PLL_POWER_OFF = 0x0,
755 DSI_PLL_POWER_ON_HSCLK = 0x1,
756 DSI_PLL_POWER_ON_ALL = 0x2,
757 DSI_PLL_POWER_ON_DIV = 0x3,
760 static int dsi_pll_power(enum dsi_pll_power_state state)
764 REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */
767 while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
770 DSSERR("Failed to set DSI PLL power mode to %d\n",
779 int dsi_pll_calc_pck(bool is_tft, unsigned long req_pck,
780 struct dsi_clock_info *cinfo)
782 struct dsi_clock_info cur, best;
786 if (req_pck == dsi.cache_req_pck &&
787 dsi.cache_cinfo.clkin == dss_clk_get_rate(DSS_CLK_FCK2)) {
788 DSSDBG("DSI clock info found from cache\n");
789 *cinfo = dsi.cache_cinfo;
793 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
795 if (min_fck_per_pck &&
796 req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
797 DSSERR("Requested pixel clock not possible with the current "
798 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
799 "the constraint off.\n");
803 DSSDBG("dsi_pll_calc\n");
806 memset(&best, 0, sizeof(best));
808 memset(&cur, 0, sizeof(cur));
809 cur.clkin = dss_clk_get_rate(DSS_CLK_FCK2);
810 cur.use_dss2_fck = 1;
813 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
814 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
815 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
816 for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
817 if (cur.highfreq == 0)
818 cur.fint = cur.clkin / cur.regn;
820 cur.fint = cur.clkin / (2 * cur.regn);
822 if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
825 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
826 for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
829 a = 2 * cur.regm * (cur.clkin/1000);
830 b = cur.regn * (cur.highfreq + 1);
831 cur.dsiphy = a / b * 1000;
833 if (cur.dsiphy > 1800 * 1000 * 1000)
836 /* DSI1_PLL_FCLK(MHz) = DSIPHY(MHz) / regm3 < 173MHz */
837 for (cur.regm3 = 1; cur.regm3 < REGM3_MAX;
839 cur.dsi1_pll_fclk = cur.dsiphy / cur.regm3;
841 /* this will narrow down the search a bit,
842 * but still give pixclocks below what was
844 if (cur.dsi1_pll_fclk < req_pck)
847 if (cur.dsi1_pll_fclk > DISPC_MAX_FCK)
850 if (min_fck_per_pck &&
852 req_pck * min_fck_per_pck)
857 find_lck_pck_divs(is_tft, req_pck,
862 cur.lck = cur.dsi1_pll_fclk / cur.lck_div;
863 cur.pck = cur.lck / cur.pck_div;
865 if (abs(cur.pck - req_pck) <
866 abs(best.pck - req_pck)) {
869 if (cur.pck == req_pck)
877 if (min_fck_per_pck) {
878 DSSERR("Could not find suitable clock settings.\n"
879 "Turning FCK/PCK constraint off and"
885 DSSERR("Could not find suitable clock settings.\n");
890 /* DSI2_PLL_FCLK (regm4) is not used. Set it to something sane. */
891 best.regm4 = best.dsiphy / 48000000;
892 if (best.regm4 > REGM4_MAX)
893 best.regm4 = REGM4_MAX;
894 else if (best.regm4 == 0)
896 best.dsi2_pll_fclk = best.dsiphy / best.regm4;
901 dsi.cache_req_pck = req_pck;
902 dsi.cache_clk_freq = 0;
903 dsi.cache_cinfo = best;
908 static int dsi_pll_calc_ddrfreq(unsigned long clk_freq,
909 struct dsi_clock_info *cinfo)
911 struct dsi_clock_info cur, best;
912 const bool use_dss2_fck = 1;
913 unsigned long datafreq;
915 DSSDBG("dsi_pll_calc_ddrfreq\n");
917 if (clk_freq == dsi.cache_clk_freq &&
918 dsi.cache_cinfo.clkin == dss_clk_get_rate(DSS_CLK_FCK2)) {
919 DSSDBG("DSI clock info found from cache\n");
920 *cinfo = dsi.cache_cinfo;
924 datafreq = clk_freq * 4;
926 memset(&best, 0, sizeof(best));
928 memset(&cur, 0, sizeof(cur));
929 cur.use_dss2_fck = use_dss2_fck;
931 cur.clkin = dss_clk_get_rate(DSS_CLK_FCK2);
934 cur.clkin = dispc_pclk_rate();
935 if (cur.clkin < 32000000)
941 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
942 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
943 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
944 for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
945 if (cur.highfreq == 0)
946 cur.fint = cur.clkin / cur.regn;
948 cur.fint = cur.clkin / (2 * cur.regn);
950 if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
953 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
954 for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
957 a = 2 * cur.regm * (cur.clkin/1000);
958 b = cur.regn * (cur.highfreq + 1);
959 cur.dsiphy = a / b * 1000;
961 if (cur.dsiphy > 1800 * 1000 * 1000)
964 if (abs(cur.dsiphy - datafreq) <
965 abs(best.dsiphy - datafreq)) {
967 /* DSSDBG("best %ld\n", best.dsiphy); */
970 if (cur.dsiphy == datafreq)
975 /* DSI1_PLL_FCLK (regm3) is not used. Set it to something sane. */
976 best.regm3 = best.dsiphy / 48000000;
977 if (best.regm3 > REGM3_MAX)
978 best.regm3 = REGM3_MAX;
979 else if (best.regm3 == 0)
981 best.dsi1_pll_fclk = best.dsiphy / best.regm3;
983 /* DSI2_PLL_FCLK (regm4) is not used. Set it to something sane. */
984 best.regm4 = best.dsiphy / 48000000;
985 if (best.regm4 > REGM4_MAX)
986 best.regm4 = REGM4_MAX;
987 else if (best.regm4 == 0)
989 best.dsi2_pll_fclk = best.dsiphy / best.regm4;
994 dsi.cache_clk_freq = clk_freq;
995 dsi.cache_req_pck = 0;
996 dsi.cache_cinfo = best;
1001 int dsi_pll_program(struct dsi_clock_info *cinfo)
1006 DSSDBG("dsi_pll_program\n");
1008 dsi.dsiphy = cinfo->dsiphy;
1009 dsi.ddr_clk = dsi.dsiphy / 4;
1010 dsi.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
1011 dsi.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
1013 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1015 DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
1016 cinfo->use_dss2_fck ? "dss2_fck" : "pclkfree",
1020 /* DSIPHY == CLKIN4DDR */
1021 DSSDBG("DSIPHY = 2 * %d / %d * %lu / %d = %lu\n",
1025 cinfo->highfreq + 1,
1028 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1029 dsi.dsiphy / 1000 / 1000 / 2);
1031 DSSDBG("Clock lane freq %ld Hz\n", dsi.ddr_clk);
1033 DSSDBG("regm3 = %d, dsi1_pll_fclk = %lu\n",
1034 cinfo->regm3, cinfo->dsi1_pll_fclk);
1035 DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n",
1036 cinfo->regm4, cinfo->dsi2_pll_fclk);
1038 REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
1040 l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
1041 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1042 l = FLD_MOD(l, cinfo->regn - 1, 7, 1); /* DSI_PLL_REGN */
1043 l = FLD_MOD(l, cinfo->regm, 18, 8); /* DSI_PLL_REGM */
1044 l = FLD_MOD(l, cinfo->regm3 - 1, 22, 19); /* DSI_CLOCK_DIV */
1045 l = FLD_MOD(l, cinfo->regm4 - 1, 26, 23); /* DSIPROTO_CLOCK_DIV */
1046 dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
1048 l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
1049 l = FLD_MOD(l, 7, 4, 1); /* DSI_PLL_FREQSEL */
1050 /* DSI_PLL_CLKSEL */
1051 l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1, 11, 11);
1052 l = FLD_MOD(l, cinfo->highfreq, 12, 12); /* DSI_PLL_HIGHFREQ */
1053 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1054 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1055 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1056 dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
1058 REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1060 if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) {
1061 DSSERR("dsi pll go bit not going down.\n");
1066 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) {
1067 DSSERR("cannot lock PLL\n");
1074 l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
1075 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1076 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1077 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1078 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1079 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1080 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1081 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1082 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1083 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1084 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1085 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1086 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1087 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1088 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1089 dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
1091 DSSDBG("PLL config done\n");
1096 int dsi_pll_init(bool enable_hsclk, bool enable_hsdiv)
1099 enum dsi_pll_power_state pwstate;
1100 struct dispc_clock_info cinfo;
1102 DSSDBG("PLL init\n");
1105 dsi_enable_pll_clock(1);
1107 /* XXX this should be calculated depending on the screen size,
1108 * required framerate and DSI speed.
1109 * For now 48MHz is enough for 864x480@60 with 360Mbps/lane
1111 r = dispc_calc_clock_div(1, 48 * 1000 * 1000, &cinfo);
1115 r = dispc_set_clock_div(&cinfo);
1117 DSSERR("Failed to set basic clocks\n");
1121 r = dss_dsi_power_up();
1125 /* XXX PLL does not come out of reset without this... */
1126 dispc_pck_free_enable(1);
1128 if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
1129 DSSERR("PLL not coming out of reset.\n");
1134 /* XXX ... but if left on, we get problems when planes do not
1135 * fill the whole display. No idea about this */
1136 dispc_pck_free_enable(0);
1138 if (enable_hsclk && enable_hsdiv)
1139 pwstate = DSI_PLL_POWER_ON_ALL;
1140 else if (enable_hsclk)
1141 pwstate = DSI_PLL_POWER_ON_HSCLK;
1142 else if (enable_hsdiv)
1143 pwstate = DSI_PLL_POWER_ON_DIV;
1145 pwstate = DSI_PLL_POWER_OFF;
1147 r = dsi_pll_power(pwstate);
1152 DSSDBG("PLL init done\n");
1156 dss_dsi_power_down();
1159 dsi_enable_pll_clock(0);
1163 void dsi_pll_uninit(void)
1166 dsi_enable_pll_clock(0);
1169 dsi_pll_power(DSI_PLL_POWER_OFF);
1170 dss_dsi_power_down();
1171 DSSDBG("PLL uninit done\n");
1174 unsigned long dsi_get_dsi1_pll_rate(void)
1176 return dsi.dsi1_pll_fclk;
1179 unsigned long dsi_get_dsi2_pll_rate(void)
1181 return dsi.dsi2_pll_fclk;
1184 void dsi_dump_clocks(struct seq_file *s)
1190 clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11);
1192 seq_printf(s, "- dsi -\n");
1194 seq_printf(s, "dsi fclk source = %s\n",
1195 dss_get_dsi_clk_source() == 0 ?
1196 "dss1_alwon_fclk" : "dsi2_pll_fclk");
1198 seq_printf(s, "dsi pll source = %s\n",
1200 "dss2_alwon_fclk" : "pclkfree");
1202 seq_printf(s, "DSIPHY\t\t%lu\nDDR_CLK\t\t%lu\n",
1203 dsi.dsiphy, dsi.ddr_clk);
1205 seq_printf(s, "dsi1_pll_fck\t%lu (%s)\n"
1206 "dsi2_pll_fck\t%lu (%s)\n",
1208 dss_get_dispc_clk_source() == 0 ? "off" : "on",
1210 dss_get_dsi_clk_source() == 0 ? "off" : "on");
1215 void dsi_dump_regs(struct seq_file *s)
1217 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
1219 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
1221 DUMPREG(DSI_REVISION);
1222 DUMPREG(DSI_SYSCONFIG);
1223 DUMPREG(DSI_SYSSTATUS);
1224 DUMPREG(DSI_IRQSTATUS);
1225 DUMPREG(DSI_IRQENABLE);
1227 DUMPREG(DSI_COMPLEXIO_CFG1);
1228 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1229 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1230 DUMPREG(DSI_CLK_CTRL);
1231 DUMPREG(DSI_TIMING1);
1232 DUMPREG(DSI_TIMING2);
1233 DUMPREG(DSI_VM_TIMING1);
1234 DUMPREG(DSI_VM_TIMING2);
1235 DUMPREG(DSI_VM_TIMING3);
1236 DUMPREG(DSI_CLK_TIMING);
1237 DUMPREG(DSI_TX_FIFO_VC_SIZE);
1238 DUMPREG(DSI_RX_FIFO_VC_SIZE);
1239 DUMPREG(DSI_COMPLEXIO_CFG2);
1240 DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1241 DUMPREG(DSI_VM_TIMING4);
1242 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1243 DUMPREG(DSI_VM_TIMING5);
1244 DUMPREG(DSI_VM_TIMING6);
1245 DUMPREG(DSI_VM_TIMING7);
1246 DUMPREG(DSI_STOPCLK_TIMING);
1248 DUMPREG(DSI_VC_CTRL(0));
1249 DUMPREG(DSI_VC_TE(0));
1250 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1251 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1252 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1253 DUMPREG(DSI_VC_IRQSTATUS(0));
1254 DUMPREG(DSI_VC_IRQENABLE(0));
1256 DUMPREG(DSI_VC_CTRL(1));
1257 DUMPREG(DSI_VC_TE(1));
1258 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1259 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1260 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1261 DUMPREG(DSI_VC_IRQSTATUS(1));
1262 DUMPREG(DSI_VC_IRQENABLE(1));
1264 DUMPREG(DSI_VC_CTRL(2));
1265 DUMPREG(DSI_VC_TE(2));
1266 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1267 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1268 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1269 DUMPREG(DSI_VC_IRQSTATUS(2));
1270 DUMPREG(DSI_VC_IRQENABLE(2));
1272 DUMPREG(DSI_VC_CTRL(3));
1273 DUMPREG(DSI_VC_TE(3));
1274 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1275 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1276 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1277 DUMPREG(DSI_VC_IRQSTATUS(3));
1278 DUMPREG(DSI_VC_IRQENABLE(3));
1280 DUMPREG(DSI_DSIPHY_CFG0);
1281 DUMPREG(DSI_DSIPHY_CFG1);
1282 DUMPREG(DSI_DSIPHY_CFG2);
1283 DUMPREG(DSI_DSIPHY_CFG5);
1285 DUMPREG(DSI_PLL_CONTROL);
1286 DUMPREG(DSI_PLL_STATUS);
1287 DUMPREG(DSI_PLL_GO);
1288 DUMPREG(DSI_PLL_CONFIGURATION1);
1289 DUMPREG(DSI_PLL_CONFIGURATION2);
1291 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
1295 enum dsi_complexio_power_state {
1296 DSI_COMPLEXIO_POWER_OFF = 0x0,
1297 DSI_COMPLEXIO_POWER_ON = 0x1,
1298 DSI_COMPLEXIO_POWER_ULPS = 0x2,
1301 static int dsi_complexio_power(enum dsi_complexio_power_state state)
1306 REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27);
1309 while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
1312 DSSERR("failed to set complexio power state to "
1321 static void dsi_complexio_config(struct omap_display *display)
1325 int clk_lane = display->hw_config.u.dsi.clk_lane;
1326 int data1_lane = display->hw_config.u.dsi.data1_lane;
1327 int data2_lane = display->hw_config.u.dsi.data2_lane;
1328 int clk_pol = display->hw_config.u.dsi.clk_pol;
1329 int data1_pol = display->hw_config.u.dsi.data1_pol;
1330 int data2_pol = display->hw_config.u.dsi.data2_pol;
1332 r = dsi_read_reg(DSI_COMPLEXIO_CFG1);
1333 r = FLD_MOD(r, clk_lane, 2, 0);
1334 r = FLD_MOD(r, clk_pol, 3, 3);
1335 r = FLD_MOD(r, data1_lane, 6, 4);
1336 r = FLD_MOD(r, data1_pol, 7, 7);
1337 r = FLD_MOD(r, data2_lane, 10, 8);
1338 r = FLD_MOD(r, data2_pol, 11, 11);
1339 dsi_write_reg(DSI_COMPLEXIO_CFG1, r);
1341 /* The configuration of the DSI complex I/O (number of data lanes,
1342 position, differential order) should not be changed while
1343 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
1344 the hardware to take into account a new configuration of the complex
1345 I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
1346 follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
1347 then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
1348 DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
1349 DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
1350 DSI complex I/O configuration is unknown. */
1353 REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
1354 REG_FLD_MOD(DSI_CTRL, 0, 0, 0);
1355 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20);
1356 REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
1360 static inline unsigned ns2ddr(unsigned ns)
1362 /* convert time in ns to ddr ticks, rounding up */
1363 return (ns * (dsi.ddr_clk/1000/1000) + 999) / 1000;
1366 static inline unsigned ddr2ns(unsigned ddr)
1368 return ddr * 1000 * 1000 / (dsi.ddr_clk / 1000);
1371 static void dsi_complexio_timings(void)
1374 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
1375 u32 tlpx_half, tclk_trail, tclk_zero;
1378 /* calculate timings */
1380 /* 1 * DDR_CLK = 2 * UI */
1382 /* min 40ns + 4*UI max 85ns + 6*UI */
1383 ths_prepare = ns2ddr(59) + 2;
1385 /* min 145ns + 10*UI */
1386 ths_prepare_ths_zero = ns2ddr(145) + 5;
1388 /* min max(8*UI, 60ns+4*UI) */
1389 ths_trail = max((unsigned)4, ns2ddr(60) + 2);
1392 ths_exit = ns2ddr(100);
1395 tlpx_half = ns2ddr(25);
1398 tclk_trail = ns2ddr(60);
1400 /* min 38ns, max 95ns */
1401 tclk_prepare = ns2ddr(38);
1403 /* min tclk-prepare + tclk-zero = 300ns */
1404 tclk_zero = ns2ddr(300 - 38);
1406 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
1407 ths_prepare, ddr2ns(ths_prepare),
1408 ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero));
1409 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
1410 ths_trail, ddr2ns(ths_trail),
1411 ths_exit, ddr2ns(ths_exit));
1413 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
1414 "tclk_zero %u (%uns)\n",
1415 tlpx_half, ddr2ns(tlpx_half),
1416 tclk_trail, ddr2ns(tclk_trail),
1417 tclk_zero, ddr2ns(tclk_zero));
1418 DSSDBG("tclk_prepare %u (%uns)\n",
1419 tclk_prepare, ddr2ns(tclk_prepare));
1421 /* program timings */
1423 r = dsi_read_reg(DSI_DSIPHY_CFG0);
1424 r = FLD_MOD(r, ths_prepare, 31, 24);
1425 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
1426 r = FLD_MOD(r, ths_trail, 15, 8);
1427 r = FLD_MOD(r, ths_exit, 7, 0);
1428 dsi_write_reg(DSI_DSIPHY_CFG0, r);
1430 r = dsi_read_reg(DSI_DSIPHY_CFG1);
1431 r = FLD_MOD(r, tlpx_half, 22, 16);
1432 r = FLD_MOD(r, tclk_trail, 15, 8);
1433 r = FLD_MOD(r, tclk_zero, 7, 0);
1434 dsi_write_reg(DSI_DSIPHY_CFG1, r);
1436 r = dsi_read_reg(DSI_DSIPHY_CFG2);
1437 r = FLD_MOD(r, tclk_prepare, 7, 0);
1438 dsi_write_reg(DSI_DSIPHY_CFG2, r);
1442 static int dsi_complexio_init(struct omap_display *display)
1446 DSSDBG("dsi_complexio_init\n");
1448 /* CIO_CLK_ICG, enable L3 clk to CIO */
1449 REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
1451 /* A dummy read using the SCP interface to any DSIPHY register is
1452 * required after DSIPHY reset to complete the reset of the DSI complex
1454 dsi_read_reg(DSI_DSIPHY_CFG5);
1456 if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) {
1457 DSSERR("ComplexIO PHY not coming out of reset.\n");
1462 dsi_complexio_config(display);
1464 r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON);
1469 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
1470 DSSERR("ComplexIO not coming out of reset.\n");
1475 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
1476 DSSERR("ComplexIO LDO power down.\n");
1481 dsi_complexio_timings();
1484 The configuration of the DSI complex I/O (number of data lanes,
1485 position, differential order) should not be changed while
1486 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the
1487 hardware to recognize a new configuration of the complex I/O (done
1488 in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow
1489 this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next
1490 reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20]
1491 LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN
1492 bit to 1. If the sequence is not followed, the DSi complex I/O
1493 configuration is undetermined.
1497 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
1501 DSSDBG("CIO init done\n");
1506 static void dsi_complexio_uninit(void)
1508 dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF);
1511 static int _dsi_wait_reset(void)
1515 while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
1517 DSSERR("soft reset failed\n");
1526 static int _dsi_reset(void)
1529 REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1);
1530 return _dsi_wait_reset();
1534 static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
1535 enum fifo_size size3, enum fifo_size size4)
1541 dsi.vc[0].fifo_size = size1;
1542 dsi.vc[1].fifo_size = size2;
1543 dsi.vc[2].fifo_size = size3;
1544 dsi.vc[3].fifo_size = size4;
1546 for (i = 0; i < 4; i++) {
1548 int size = dsi.vc[i].fifo_size;
1550 if (add + size > 4) {
1551 DSSERR("Illegal FIFO configuration\n");
1555 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
1557 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
1561 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r);
1564 static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
1565 enum fifo_size size3, enum fifo_size size4)
1571 dsi.vc[0].fifo_size = size1;
1572 dsi.vc[1].fifo_size = size2;
1573 dsi.vc[2].fifo_size = size3;
1574 dsi.vc[3].fifo_size = size4;
1576 for (i = 0; i < 4; i++) {
1578 int size = dsi.vc[i].fifo_size;
1580 if (add + size > 4) {
1581 DSSERR("Illegal FIFO configuration\n");
1585 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
1587 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
1591 dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r);
1594 static int dsi_force_tx_stop_mode_io(void)
1598 r = dsi_read_reg(DSI_TIMING1);
1599 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
1600 dsi_write_reg(DSI_TIMING1, r);
1602 if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) {
1603 DSSERR("TX_STOP bit not going down\n");
1610 static void dsi_vc_print_status(int channel)
1614 r = dsi_read_reg(DSI_VC_CTRL(channel));
1615 DSSDBG("vc %d: TX_FIFO_NOT_EMPTY %d, BTA_EN %d, VC_BUSY %d, "
1616 "TX_FIFO_FULL %d, RX_FIFO_NOT_EMPTY %d, ",
1622 FLD_GET(r, 20, 20));
1624 r = dsi_read_reg(DSI_TX_FIFO_VC_EMPTINESS);
1625 DSSDBG("EMPTINESS %d\n", (r >> (8 * channel)) & 0xff);
1628 static void dsi_vc_config(int channel)
1632 DSSDBG("dsi_vc_config %d\n", channel);
1634 r = dsi_read_reg(DSI_VC_CTRL(channel));
1636 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
1637 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
1638 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
1639 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
1640 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
1641 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
1642 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
1644 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
1645 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
1647 dsi_write_reg(DSI_VC_CTRL(channel), r);
1650 static void dsi_vc_config_vp(int channel)
1654 DSSDBG("dsi_vc_config_vp\n");
1656 r = dsi_read_reg(DSI_VC_CTRL(channel));
1658 r = FLD_MOD(r, 1, 1, 1); /* SOURCE, 1 = video port */
1659 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
1660 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
1661 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
1662 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
1663 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
1664 r = FLD_MOD(r, 1, 9, 9); /* MODE_SPEED, high speed on/off */
1666 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
1667 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
1669 dsi_write_reg(DSI_VC_CTRL(channel), r);
1673 static int dsi_vc_enable(int channel, bool enable)
1675 DSSDBG("dsi_vc_enable channel %d, enable %d\n", channel, enable);
1677 enable = enable ? 1 : 0;
1679 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0);
1681 if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) {
1682 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
1689 static void dsi_vc_enable_hs(int channel, bool enable)
1691 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
1693 dsi_vc_enable(channel, 0);
1696 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9);
1698 dsi_vc_enable(channel, 1);
1701 dsi_force_tx_stop_mode_io();
1704 static void dsi_vc_flush_long_data(int channel)
1706 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
1708 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
1709 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
1713 (val >> 24) & 0xff);
1717 static void dsi_show_rx_ack_with_err(u16 err)
1719 DSSERR("\tACK with ERROR (%#x):\n", err);
1721 DSSERR("\t\tSoT Error\n");
1723 DSSERR("\t\tSoT Sync Error\n");
1725 DSSERR("\t\tEoT Sync Error\n");
1727 DSSERR("\t\tEscape Mode Entry Command Error\n");
1729 DSSERR("\t\tLP Transmit Sync Error\n");
1731 DSSERR("\t\tHS Receive Timeout Error\n");
1733 DSSERR("\t\tFalse Control Error\n");
1735 DSSERR("\t\t(reserved7)\n");
1737 DSSERR("\t\tECC Error, single-bit (corrected)\n");
1739 DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
1740 if (err & (1 << 10))
1741 DSSERR("\t\tChecksum Error\n");
1742 if (err & (1 << 11))
1743 DSSERR("\t\tData type not recognized\n");
1744 if (err & (1 << 12))
1745 DSSERR("\t\tInvalid VC ID\n");
1746 if (err & (1 << 13))
1747 DSSERR("\t\tInvalid Transmission Length\n");
1748 if (err & (1 << 14))
1749 DSSERR("\t\t(reserved14)\n");
1750 if (err & (1 << 15))
1751 DSSERR("\t\tDSI Protocol Violation\n");
1754 static u16 dsi_vc_flush_receive_data(int channel)
1756 /* RX_FIFO_NOT_EMPTY */
1757 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
1760 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
1761 DSSDBG("\trawval %#08x\n", val);
1762 dt = FLD_GET(val, 5, 0);
1763 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
1764 u16 err = FLD_GET(val, 23, 8);
1765 dsi_show_rx_ack_with_err(err);
1766 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
1767 DSSDBG("\tDCS short response, 1 byte: %#x\n",
1768 FLD_GET(val, 23, 8));
1769 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
1770 DSSDBG("\tDCS short response, 2 byte: %#x\n",
1771 FLD_GET(val, 23, 8));
1772 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
1773 DSSDBG("\tDCS long response, len %d\n",
1774 FLD_GET(val, 23, 8));
1775 dsi_vc_flush_long_data(channel);
1777 DSSERR("\tunknown datatype 0x%02x\n", dt);
1783 static int dsi_vc_send_bta(int channel)
1787 /*DSSDBG("dsi_vc_send_bta_sync %d\n", channel); */
1789 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */
1790 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
1791 dsi_vc_flush_receive_data(channel);
1794 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
1796 tmo = jiffies + msecs_to_jiffies(10);
1797 while (REG_GET(DSI_VC_CTRL(channel), 6, 6) == 1) {
1798 if (time_after(jiffies, tmo)) {
1799 DSSERR("Failed to send BTA\n");
1807 static int dsi_vc_send_bta_sync(int channel)
1811 init_completion(&dsi.bta_completion);
1813 dsi_vc_enable_bta_irq(channel);
1815 r = dsi_vc_send_bta(channel);
1819 if (wait_for_completion_timeout(&dsi.bta_completion,
1820 msecs_to_jiffies(500)) == 0) {
1821 DSSERR("Failed to receive BTA\n");
1826 dsi_vc_disable_bta_irq(channel);
1831 static inline void dsi_vc_write_long_header(int channel, u8 data_type,
1837 /*data_id = data_type | channel << 6; */
1838 data_id = data_type | dsi.vc[channel].dest_per << 6;
1840 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
1841 FLD_VAL(ecc, 31, 24);
1843 dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val);
1846 static inline void dsi_vc_write_long_payload(int channel,
1847 u8 b1, u8 b2, u8 b3, u8 b4)
1851 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
1853 /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
1854 b1, b2, b3, b4, val); */
1856 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
1859 static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
1868 if (dsi.debug_write)
1869 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
1872 if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) {
1873 DSSERR("unable to send long packet: packet too long.\n");
1877 dsi_vc_write_long_header(channel, data_type, len, ecc);
1879 /*dsi_vc_print_status(0); */
1882 for (i = 0; i < len >> 2; i++) {
1883 if (dsi.debug_write)
1884 DSSDBG("\tsending full packet %d\n", i);
1885 /*dsi_vc_print_status(0); */
1892 dsi_vc_write_long_payload(channel, b1, b2, b3, b4);
1897 b1 = 0; b2 = 0; b3 = 0;
1899 if (dsi.debug_write)
1900 DSSDBG("\tsending remainder bytes %d\n", i);
1917 dsi_vc_write_long_payload(channel, b1, b2, b3, 0);
1923 static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
1928 if (dsi.debug_write)
1929 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
1931 data_type, data & 0xff, (data >> 8) & 0xff);
1933 if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) {
1934 DSSERR("ERROR FIFO FULL, aborting transfer\n");
1938 data_id = data_type | channel << 6;
1940 r = (data_id << 0) | (data << 8) | (ecc << 24);
1942 dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r);
1947 int dsi_vc_send_null(int channel)
1949 u8 nullpkg[] = {0, 0, 0, 0};
1950 return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
1952 EXPORT_SYMBOL(dsi_vc_send_null);
1954 int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len)
1961 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0,
1963 } else if (len == 2) {
1964 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1,
1965 data[0] | (data[1] << 8), 0);
1967 /* 0x39 = DCS Long Write */
1968 r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE,
1974 EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
1976 int dsi_vc_dcs_write(int channel, u8 *data, int len)
1980 r = dsi_vc_dcs_write_nosync(channel, data, len);
1984 /* Some devices need time to process the msg in low power mode.
1985 This also makes the write synchronous, and checks that
1986 the peripheral is still alive */
1987 r = dsi_vc_send_bta_sync(channel);
1991 EXPORT_SYMBOL(dsi_vc_dcs_write);
1993 int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2000 DSSDBG("dsi_vc_dcs_read\n");
2002 r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
2006 r = dsi_vc_send_bta_sync(channel);
2010 if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) { /* RX_FIFO_NOT_EMPTY */
2011 DSSERR("RX fifo empty when trying to read.\n");
2015 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
2017 DSSDBG("\theader: %08x\n", val);
2018 dt = FLD_GET(val, 5, 0);
2019 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
2020 u16 err = FLD_GET(val, 23, 8);
2021 dsi_show_rx_ack_with_err(err);
2024 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2025 u8 data = FLD_GET(val, 15, 8);
2027 DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
2035 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2036 u16 data = FLD_GET(val, 23, 8);
2038 DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
2043 buf[0] = data & 0xff;
2044 buf[1] = (data >> 8) & 0xff;
2047 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2049 int len = FLD_GET(val, 23, 8);
2051 DSSDBG("\tDCS long response, len %d\n", len);
2056 /* two byte checksum ends the packet, not included in len */
2057 for (w = 0; w < len + 2;) {
2059 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
2061 DSSDBG("\t\t%02x %02x %02x %02x\n",
2065 (val >> 24) & 0xff);
2067 for (b = 0; b < 4; ++b) {
2069 buf[w] = (val >> (b * 8)) & 0xff;
2070 /* we discard the 2 byte checksum */
2078 DSSERR("\tunknown datatype 0x%02x\n", dt);
2082 EXPORT_SYMBOL(dsi_vc_dcs_read);
2085 int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
2087 return dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
2090 EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
2093 static int dsi_set_lp_rx_timeout(int ns, int x4, int x16)
2099 /* ticks in DSI_FCK */
2101 fck = dsi_fclk_rate();
2102 ticks = (fck / 1000 / 1000) * ns / 1000;
2104 if (ticks > 0x1fff) {
2105 DSSERR("LP_TX_TO too high\n");
2109 r = dsi_read_reg(DSI_TIMING2);
2110 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
2111 r = FLD_MOD(r, x16, 14, 14); /* LP_RX_TO_X16 */
2112 r = FLD_MOD(r, x4, 13, 13); /* LP_RX_TO_X4 */
2113 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
2114 dsi_write_reg(DSI_TIMING2, r);
2116 DSSDBG("LP_RX_TO %ld ns (%#x ticks)\n",
2117 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2118 (fck / 1000 / 1000),
2124 static int dsi_set_ta_timeout(int ns, int x8, int x16)
2130 /* ticks in DSI_FCK */
2132 fck = dsi_fclk_rate();
2133 ticks = (fck / 1000 / 1000) * ns / 1000;
2135 if (ticks > 0x1fff) {
2136 DSSERR("TA_TO too high\n");
2140 r = dsi_read_reg(DSI_TIMING1);
2141 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
2142 r = FLD_MOD(r, x16, 30, 30); /* TA_TO_X16 */
2143 r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */
2144 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
2145 dsi_write_reg(DSI_TIMING1, r);
2147 DSSDBG("TA_TO %ld ns (%#x ticks)\n",
2148 (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) /
2149 (fck / 1000 / 1000),
2155 static int dsi_set_stop_state_counter(int ns, int x4, int x16)
2161 /* ticks in DSI_FCK */
2163 fck = dsi_fclk_rate();
2164 ticks = (fck / 1000 / 1000) * ns / 1000;
2166 if (ticks > 0x1fff) {
2167 DSSERR("STOP_STATE_COUNTER_IO too high\n");
2171 r = dsi_read_reg(DSI_TIMING1);
2172 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2173 r = FLD_MOD(r, x16, 14, 14); /* STOP_STATE_X16_IO */
2174 r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */
2175 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
2176 dsi_write_reg(DSI_TIMING1, r);
2178 DSSDBG("STOP_STATE_COUNTER %ld ns (%#x ticks)\n",
2179 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2180 (fck / 1000 / 1000),
2186 static int dsi_set_hs_tx_timeout(int ns, int x4, int x16)
2192 /* ticks in TxByteClkHS */
2194 fck = dsi.ddr_clk / 4;
2195 ticks = (fck / 1000 / 1000) * ns / 1000;
2197 if (ticks > 0x1fff) {
2198 DSSERR("HS_TX_TO too high\n");
2202 r = dsi_read_reg(DSI_TIMING2);
2203 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
2204 r = FLD_MOD(r, x16, 30, 30); /* HS_TX_TO_X16 */
2205 r = FLD_MOD(r, x4, 29, 29); /* HS_TX_TO_X8 (4 really) */
2206 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
2207 dsi_write_reg(DSI_TIMING2, r);
2209 DSSDBG("HS_TX_TO %ld ns (%#x ticks)\n",
2210 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2211 (fck / 1000 / 1000),
2216 static int dsi_proto_config(struct omap_display *display)
2222 dsi_config_tx_fifo(DSI_FIFO_SIZE_128,
2227 dsi_config_rx_fifo(DSI_FIFO_SIZE_128,
2232 /* XXX what values for the timeouts? */
2233 dsi_set_stop_state_counter(1000, 0, 0);
2235 dsi_set_ta_timeout(50000, 1, 1);
2238 dsi_set_lp_rx_timeout(3000, 0, 1);
2241 dsi_set_hs_tx_timeout(10000, 1, 0);
2243 switch (display->ctrl->pixel_size) {
2257 r = dsi_read_reg(DSI_CTRL);
2258 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
2259 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
2260 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
2262 div = dispc_lclk_rate() / dispc_pclk_rate();
2263 r = FLD_MOD(r, div == 2 ? 0 : 1, 4, 4); /* VP_CLK_RATIO */
2264 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
2265 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
2266 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
2267 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
2268 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
2269 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
2270 r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */
2272 dsi_write_reg(DSI_CTRL, r);
2274 /* we configure vc0 for L4 communication, and
2277 dsi_vc_config_vp(1);
2279 /* set all vc targets to peripheral 0 */
2280 dsi.vc[0].dest_per = 0;
2281 dsi.vc[1].dest_per = 0;
2282 dsi.vc[2].dest_per = 0;
2283 dsi.vc[3].dest_per = 0;
2288 static void dsi_proto_timings(void)
2290 int tlpx_half, tclk_zero, tclk_prepare, tclk_trail;
2291 int tclk_pre, tclk_post;
2292 int ddr_clk_pre, ddr_clk_post;
2295 r = dsi_read_reg(DSI_DSIPHY_CFG1);
2296 tlpx_half = FLD_GET(r, 22, 16);
2297 tclk_trail = FLD_GET(r, 15, 8);
2298 tclk_zero = FLD_GET(r, 7, 0);
2300 r = dsi_read_reg(DSI_DSIPHY_CFG2);
2301 tclk_prepare = FLD_GET(r, 7, 0);
2305 /* min 60ns + 52*UI */
2306 tclk_post = ns2ddr(60) + 26;
2308 ddr_clk_pre = (tclk_pre + tlpx_half*2 + tclk_zero + tclk_prepare) / 4;
2309 ddr_clk_post = (tclk_post + tclk_trail) / 4;
2311 r = dsi_read_reg(DSI_CLK_TIMING);
2312 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
2313 r = FLD_MOD(r, ddr_clk_post, 7, 0);
2314 dsi_write_reg(DSI_CLK_TIMING, r);
2316 DSSDBG("ddr_clk_pre %d, ddr_clk_post %d\n",
2322 #define DSI_DECL_VARS \
2323 int __dsi_cb = 0; u32 __dsi_cv = 0;
2325 #define DSI_FLUSH(ch) \
2326 if (__dsi_cb > 0) { \
2327 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
2328 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
2329 __dsi_cb = __dsi_cv = 0; \
2332 #define DSI_PUSH(ch, data) \
2334 __dsi_cv |= (data) << (__dsi_cb * 8); \
2335 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
2336 if (++__dsi_cb > 3) \
2340 static int dsi_update_screen_l4(struct omap_display *display,
2341 int x, int y, int w, int h)
2343 /* Note: supports only 24bit colors in 32bit container */
2345 int fifo_stalls = 0;
2346 int max_dsi_packet_size;
2347 int max_data_per_packet;
2348 int max_pixels_per_packet;
2356 struct omap_overlay *ovl;
2360 DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
2363 ovl = display->manager->overlays[0];
2365 if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
2368 if (display->ctrl->pixel_size != 24)
2371 scr_width = ovl->info.screen_width;
2372 data = ovl->info.vaddr;
2374 start_offset = scr_width * y + x;
2375 horiz_inc = scr_width - w;
2378 /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
2381 /* When using CPU, max long packet size is TX buffer size */
2382 max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4;
2384 /* we seem to get better perf if we divide the tx fifo to half,
2385 and while the other half is being sent, we fill the other half
2386 max_dsi_packet_size /= 2; */
2388 max_data_per_packet = max_dsi_packet_size - 4 - 1;
2390 max_pixels_per_packet = max_data_per_packet / bytespp;
2392 DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
2394 display->ctrl->setup_update(display, x, y, w, h);
2396 pixels_left = w * h;
2398 DSSDBG("total pixels %d\n", pixels_left);
2400 data += start_offset;
2403 dsi.update_region.x = x;
2404 dsi.update_region.y = y;
2405 dsi.update_region.w = w;
2406 dsi.update_region.h = h;
2407 dsi.update_region.bytespp = bytespp;
2412 while (pixels_left > 0) {
2413 /* 0x2c = write_memory_start */
2414 /* 0x3c = write_memory_continue */
2415 u8 dcs_cmd = first ? 0x2c : 0x3c;
2421 /* using fifo not empty */
2422 /* TX_FIFO_NOT_EMPTY */
2423 while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
2426 if (fifo_stalls > 0xfffff) {
2427 DSSERR("fifo stalls overflow, pixels left %d\n",
2434 /* using fifo emptiness */
2435 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
2436 max_dsi_packet_size) {
2438 if (fifo_stalls > 0xfffff) {
2439 DSSERR("fifo stalls overflow, pixels left %d\n",
2446 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) {
2448 if (fifo_stalls > 0xfffff) {
2449 DSSERR("fifo stalls overflow, pixels left %d\n",
2456 pixels = min(max_pixels_per_packet, pixels_left);
2458 pixels_left -= pixels;
2460 dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE,
2461 1 + pixels * bytespp, 0);
2463 DSI_PUSH(0, dcs_cmd);
2465 while (pixels-- > 0) {
2466 u32 pix = __raw_readl(data++);
2468 DSI_PUSH(0, (pix >> 16) & 0xff);
2469 DSI_PUSH(0, (pix >> 8) & 0xff);
2470 DSI_PUSH(0, (pix >> 0) & 0xff);
2473 if (current_x == x+w) {
2488 static void dsi_clear_screen_l4(struct omap_display *display,
2489 int x, int y, int w, int h)
2492 int fifo_stalls = 0;
2493 int max_dsi_packet_size;
2494 int max_data_per_packet;
2495 int max_pixels_per_packet;
2502 DSSDBG("dsi_clear_screen_l4 (%d,%d %dx%d)\n",
2505 if (display->ctrl->bpp != 24)
2508 /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp)
2511 /* When using CPU, max long packet size is TX buffer size */
2512 max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4;
2514 max_data_per_packet = max_dsi_packet_size - 4 - 1;
2516 max_pixels_per_packet = max_data_per_packet / bytespp;
2520 display->ctrl->setup_update(display, x, y, w, h);
2522 pixels_left = w * h;
2524 dsi.update_region.x = x;
2525 dsi.update_region.y = y;
2526 dsi.update_region.w = w;
2527 dsi.update_region.h = h;
2528 dsi.update_region.bytespp = bytespp;
2534 while (pixels_left > 0) {
2535 /* 0x2c = write_memory_start */
2536 /* 0x3c = write_memory_continue */
2537 u8 dcs_cmd = first ? 0x2c : 0x3c;
2542 /* TX_FIFO_NOT_EMPTY */
2543 while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
2545 if (fifo_stalls > 0xfffff) {
2546 DSSERR("fifo stalls overflow\n");
2553 pixels = min(max_pixels_per_packet, pixels_left);
2555 pixels_left -= pixels;
2557 dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE,
2558 1 + pixels * bytespp, 0);
2560 DSI_PUSH(0, dcs_cmd);
2562 while (pixels-- > 0) {
2567 DSI_PUSH(0, (pix >> 16) & 0xff);
2568 DSI_PUSH(0, (pix >> 8) & 0xff);
2569 DSI_PUSH(0, (pix >> 0) & 0xff);
2577 end_measuring("L4 CLEAR");
2581 static void dsi_setup_update_dispc(struct omap_display *display,
2582 u16 x, u16 y, u16 w, u16 h)
2584 DSSDBG("dsi_setup_update_dispc(%d,%d %dx%d)\n",
2588 dsi.update_region.x = x;
2589 dsi.update_region.y = y;
2590 dsi.update_region.w = w;
2591 dsi.update_region.h = h;
2592 dsi.update_region.bytespp = 3; // XXX
2595 dispc_setup_partial_planes(display, &x, &y, &w, &h);
2597 dispc_set_lcd_size(w, h);
2600 static void dsi_setup_autoupdate_dispc(struct omap_display *display)
2604 display->get_resolution(display, &w, &h);
2607 dsi.update_region.x = 0;
2608 dsi.update_region.y = 0;
2609 dsi.update_region.w = w;
2610 dsi.update_region.h = h;
2611 dsi.update_region.bytespp = 3; // XXX
2614 /* the overlay settings may not have been applied, if we were in manual
2615 * mode earlier, so do it here */
2616 display->manager->apply(display->manager);
2618 dispc_set_lcd_size(w, h);
2620 dsi.autoupdate_setup = 0;
2623 static void dsi_update_screen_dispc(struct omap_display *display,
2624 u16 x, u16 y, u16 w, u16 h)
2633 if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
2634 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
2637 len = w * h * bytespp;
2639 /* XXX: one packet could be longer, I think? Line buffer is
2640 * 1024 x 24bits, but we have to put DCS cmd there also.
2641 * 1023 * 3 should work, but causes strange color effects. */
2642 packet_payload = min(w, (u16)1020) * bytespp;
2644 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
2645 total_len = (len / packet_payload) * packet_len;
2647 if (len % packet_payload)
2648 total_len += (len % packet_payload) + 1;
2650 display->ctrl->setup_update(display, x, y, w, h);
2653 dsi_vc_print_status(1);
2657 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
2658 dsi_write_reg(DSI_VC_TE(1), l);
2660 dsi_vc_write_long_header(1, DSI_DT_DCS_LONG_WRITE, packet_len, 0);
2663 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
2665 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
2666 dsi_write_reg(DSI_VC_TE(1), l);
2668 dispc_disable_sidle();
2670 dispc_enable_lcd_out(1);
2676 static void framedone_callback(void *data, u32 mask)
2678 if (dsi.framedone_scheduled) {
2679 DSSERR("Framedone already scheduled. Bogus FRAMEDONE IRQ?\n");
2683 dispc_enable_sidle();
2685 dsi.framedone_scheduled = 1;
2687 /* We get FRAMEDONE when DISPC has finished sending pixels and turns
2688 * itself off. However, DSI still has the pixels in its buffers, and
2689 * is sending the data. Thus we have to wait until we can do a new
2690 * transfer or turn the clocks off. We do that in a separate work
2692 queue_work(dsi.workqueue, &dsi.framedone_work);
2695 static void framedone_worker(struct work_struct *work)
2701 l = REG_GET(DSI_VC_TE(1), 23, 0); /* TE_SIZE */
2703 /* There shouldn't be much stuff in DSI buffers, if any, so we'll
2706 tmo = jiffies + msecs_to_jiffies(50);
2707 while (REG_GET(DSI_VC_TE(1), 23, 0) > 0) { /* TE_SIZE */
2709 if (time_after(jiffies, tmo)) {
2710 DSSERR("timeout waiting TE_SIZE to zero\n");
2717 if (REG_GET(DSI_VC_TE(1), 30, 30))
2718 DSSERR("TE_EN not zero\n");
2720 if (REG_GET(DSI_VC_TE(1), 31, 31))
2721 DSSERR("TE_START not zero\n");
2725 if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
2726 DSSDBG("FRAMEDONE\n");
2730 DSSWARN("FRAMEDONE irq too early, %d bytes, %d loops\n", l, i);
2733 DSSWARN("FRAMEDONE irq too early, %d bytes, %d loops\n", l, i);
2736 #ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
2737 dispc_fake_vsync_irq();
2739 dsi.framedone_scheduled = 0;
2741 /* XXX check that fifo is not full. otherwise we would sleep and never
2742 * get to process_cmd_fifo below */
2743 /* We check for target_update_mode, not update_mode. No reason to push
2744 * new updates if we're turning auto update off */
2745 if (dsi.target_update_mode == OMAP_DSS_UPDATE_AUTO)
2746 dsi_push_autoupdate(dsi.vc[1].display);
2748 atomic_set(&dsi.cmd_pending, 0);
2749 dsi_process_cmd_fifo(NULL);
2752 static void dsi_start_auto_update(struct omap_display *display)
2754 DSSDBG("starting auto update\n");
2756 dsi.autoupdate_setup = 1;
2758 dsi_push_autoupdate(display);
2773 /* FIFO functions */
2775 static void dsi_signal_fifo_waiters(void)
2777 if (atomic_read(&dsi.cmd_fifo_full) > 0) {
2778 DSSDBG("SIGNALING: Fifo not full for waiter!\n");
2779 complete(&dsi.cmd_done);
2780 atomic_dec(&dsi.cmd_fifo_full);
2784 /* returns 1 for async op, and 0 for sync op */
2785 static int dsi_do_update(struct omap_display *display,
2786 struct dsi_cmd_update *upd)
2789 u16 x = upd->x, y = upd->y, w = upd->w, h = upd->h;
2792 if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
2795 if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
2798 display->get_resolution(display, &dw, &dh);
2799 if (x > dw || y > dh)
2808 DSSDBGF("%d,%d %dx%d", x, y, w, h);
2812 if (display->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
2813 dsi_setup_update_dispc(display, x, y, w, h);
2814 dsi_update_screen_dispc(display, x, y, w, h);
2817 r = dsi_update_screen_l4(display, x, y, w, h);
2819 DSSERR("L4 update failed\n");
2824 /* returns 1 for async op, and 0 for sync op */
2825 static int dsi_do_autoupdate(struct omap_display *display)
2830 if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
2833 if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
2836 display->get_resolution(display, &w, &h);
2840 if (display->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
2841 if (dsi.autoupdate_setup)
2842 dsi_setup_autoupdate_dispc(display);
2843 dsi_update_screen_dispc(display, 0, 0, w, h);
2846 r = dsi_update_screen_l4(display, 0, 0, w, h);
2848 DSSERR("L4 update failed\n");
2853 static void dsi_do_cmd_mem_read(struct omap_display *display,
2854 struct dsi_cmd_mem_read *mem_read)
2857 r = display->ctrl->memory_read(display,
2865 *mem_read->ret_size = (size_t)r;
2866 complete(mem_read->completion);
2869 static void dsi_do_cmd_test(struct omap_display *display,
2870 struct dsi_cmd_test *test)
2876 if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
2879 /* run test first in low speed mode */
2880 dsi_vc_enable_hs(0, 0);
2882 if (display->ctrl->run_test) {
2883 r = display->ctrl->run_test(display, test->test_num);
2888 if (display->panel->run_test) {
2889 r = display->panel->run_test(display, test->test_num);
2894 /* then in high speed */
2895 dsi_vc_enable_hs(0, 1);
2897 if (display->ctrl->run_test) {
2898 r = display->ctrl->run_test(display, test->test_num);
2903 if (display->panel->run_test)
2904 r = display->panel->run_test(display, test->test_num);
2907 dsi_vc_enable_hs(0, 1);
2910 complete(test->completion);
2912 DSSDBG("test end\n");
2915 static void dsi_do_cmd_set_te(struct omap_display *display, bool enable)
2917 dsi.use_te = enable;
2919 if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
2922 display->ctrl->enable_te(display, enable);
2925 /* disable LP_RX_TO, so that we can receive TE.
2926 * Time to wait for TE is longer than the timer allows */
2927 REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
2929 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
2933 static void dsi_do_cmd_set_update_mode(struct omap_display *display,
2934 enum omap_dss_update_mode mode)
2936 dsi.update_mode = mode;
2938 if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
2941 if (mode == OMAP_DSS_UPDATE_AUTO)
2942 dsi_start_auto_update(display);
2945 static void dsi_process_cmd_fifo(struct work_struct *work)
2948 struct dsi_cmd_item p;
2949 unsigned long flags;
2950 struct omap_display *display;
2953 if (dsi.debug_process)
2956 if (atomic_cmpxchg(&dsi.cmd_pending, 0, 1) == 1) {
2957 if (dsi.debug_process)
2958 DSSDBG("cmd pending, skip process\n");
2963 spin_lock_irqsave(dsi.cmd_fifo->lock, flags);
2965 len = __kfifo_get(dsi.cmd_fifo, (unsigned char *)&p,
2968 if (dsi.debug_process)
2969 DSSDBG("nothing more in fifo, atomic clear\n");
2970 atomic_set(&dsi.cmd_pending, 0);
2971 spin_unlock_irqrestore(dsi.cmd_fifo->lock, flags);
2975 spin_unlock_irqrestore(dsi.cmd_fifo->lock, flags);
2977 BUG_ON(len != sizeof(p));
2979 display = p.display;
2981 if (dsi.debug_process)
2982 DSSDBG("processing cmd %d\n", p.cmd);
2985 case DSI_CMD_UPDATE:
2986 if (dsi_do_update(display, &p.u.r)) {
2987 if (dsi.debug_process)
2988 DSSDBG("async update\n");
2991 if (dsi.debug_process)
2992 DSSDBG("sync update\n");
2996 case DSI_CMD_AUTOUPDATE:
2997 if (dsi_do_autoupdate(display)) {
2998 if (dsi.debug_process)
2999 DSSDBG("async autoupdate\n");
3002 if (dsi.debug_process)
3003 DSSDBG("sync autoupdate\n");
3008 if (dsi.debug_process)
3009 DSSDBG("Signaling SYNC done!\n");
3013 case DSI_CMD_MEM_READ:
3014 dsi_do_cmd_mem_read(display, &p.u.mem_read);
3018 dsi_do_cmd_test(display, &p.u.test);
3021 case DSI_CMD_SET_TE:
3022 dsi_do_cmd_set_te(display, p.u.te);
3025 case DSI_CMD_SET_UPDATE_MODE:
3026 dsi_do_cmd_set_update_mode(display, p.u.update_mode);
3029 case DSI_CMD_SET_ROTATE:
3030 display->ctrl->set_rotate(display, p.u.rotate);
3031 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO)
3032 dsi.autoupdate_setup = 1;
3035 case DSI_CMD_SET_MIRROR:
3036 display->ctrl->set_mirror(display, p.u.mirror);
3044 if (dsi.debug_process)
3045 DSSDBG("exit dsi_process_cmd_fifo\n");
3047 dsi_signal_fifo_waiters();
3050 static void dsi_push_cmd(struct dsi_cmd_item *p)
3054 if (dsi.debug_process)
3058 unsigned long flags;
3059 unsigned avail, used;
3061 spin_lock_irqsave(dsi.cmd_fifo->lock, flags);
3062 used = __kfifo_len(dsi.cmd_fifo) / sizeof(struct dsi_cmd_item);
3063 avail = DSI_CMD_FIFO_LEN - used;
3065 if (dsi.debug_process)
3066 DSSDBG("%u/%u items left in fifo\n", avail, used);
3069 if (dsi.debug_process)
3070 DSSDBG("cmd fifo full, waiting...\n");
3071 spin_unlock_irqrestore(dsi.cmd_fifo->lock, flags);
3072 atomic_inc(&dsi.cmd_fifo_full);
3073 wait_for_completion(&dsi.cmd_done);
3074 if (dsi.debug_process)
3075 DSSDBG("cmd fifo not full, woke up\n");
3079 ret = __kfifo_put(dsi.cmd_fifo, (unsigned char *)p,
3082 spin_unlock_irqrestore(dsi.cmd_fifo->lock, flags);
3084 BUG_ON(ret != sizeof(*p));
3089 queue_work(dsi.workqueue, &dsi.process_work);
3092 static void dsi_push_update(struct omap_display *display,
3093 int x, int y, int w, int h)
3095 struct dsi_cmd_item p;
3097 p.display = display;
3098 p.cmd = DSI_CMD_UPDATE;
3105 DSSDBG("pushing UPDATE %d,%d %dx%d\n", x, y, w, h);
3110 static void dsi_push_autoupdate(struct omap_display *display)
3112 struct dsi_cmd_item p;
3114 p.display = display;
3115 p.cmd = DSI_CMD_AUTOUPDATE;
3120 static void dsi_push_sync(struct omap_display *display,
3121 struct completion *sync_comp)
3123 struct dsi_cmd_item p;
3125 p.display = display;
3126 p.cmd = DSI_CMD_SYNC;
3127 p.u.sync = sync_comp;
3129 DSSDBG("pushing SYNC\n");
3134 static void dsi_push_mem_read(struct omap_display *display,
3135 struct dsi_cmd_mem_read *mem_read)
3137 struct dsi_cmd_item p;
3139 p.display = display;
3140 p.cmd = DSI_CMD_MEM_READ;
3141 p.u.mem_read = *mem_read;
3143 DSSDBG("pushing MEM_READ\n");
3148 static void dsi_push_test(struct omap_display *display, int test_num,
3149 int *result, struct completion *completion)
3151 struct dsi_cmd_item p;
3153 p.display = display;
3154 p.cmd = DSI_CMD_TEST;
3155 p.u.test.test_num = test_num;
3156 p.u.test.result = result;
3157 p.u.test.completion = completion;
3159 DSSDBG("pushing TEST\n");
3164 static void dsi_push_set_te(struct omap_display *display, bool enable)
3166 struct dsi_cmd_item p;
3168 p.display = display;
3169 p.cmd = DSI_CMD_SET_TE;
3172 DSSDBG("pushing SET_TE\n");
3177 static void dsi_push_set_update_mode(struct omap_display *display,
3178 enum omap_dss_update_mode mode)
3180 struct dsi_cmd_item p;
3182 p.display = display;
3183 p.cmd = DSI_CMD_SET_UPDATE_MODE;
3184 p.u.update_mode = mode;
3186 DSSDBG("pushing SET_UPDATE_MODE\n");
3191 static void dsi_push_set_rotate(struct omap_display *display, int rotate)
3193 struct dsi_cmd_item p;
3195 p.display = display;
3196 p.cmd = DSI_CMD_SET_ROTATE;
3197 p.u.rotate = rotate;
3199 DSSDBG("pushing SET_ROTATE\n");
3204 static void dsi_push_set_mirror(struct omap_display *display, int mirror)
3206 struct dsi_cmd_item p;
3208 p.display = display;
3209 p.cmd = DSI_CMD_SET_MIRROR;
3210 p.u.mirror = mirror;
3212 DSSDBG("pushing SET_MIRROR\n");
3217 static int dsi_wait_sync(struct omap_display *display)
3219 long wait = msecs_to_jiffies(60000);
3220 struct completion compl;
3224 init_completion(&compl);
3225 dsi_push_sync(display, &compl);
3227 DSSDBG("Waiting for SYNC to happen...\n");
3228 wait = wait_for_completion_timeout(&compl, wait);
3229 DSSDBG("Released from SYNC\n");
3232 DSSERR("timeout waiting sync\n");
3252 static int dsi_display_init_dispc(struct omap_display *display)
3256 r = omap_dispc_register_isr(framedone_callback, NULL,
3257 DISPC_IRQ_FRAMEDONE);
3259 DSSERR("can't get FRAMEDONE irq\n");
3263 dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
3265 dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
3266 dispc_enable_fifohandcheck(1);
3268 dispc_set_tft_data_lines(display->ctrl->pixel_size);
3271 struct omap_video_timings timings = {
3280 dispc_set_lcd_timings(&timings);
3286 static void dsi_display_uninit_dispc(struct omap_display *display)
3288 omap_dispc_unregister_isr(framedone_callback, NULL,
3289 DISPC_IRQ_FRAMEDONE);
3292 static int dsi_display_init_dsi(struct omap_display *display)
3294 struct dsi_clock_info cinfo;
3297 _dsi_print_reset_status();
3299 r = dsi_pll_init(1, 0);
3303 r = dsi_pll_calc_ddrfreq(display->hw_config.u.dsi.ddr_clk_hz, &cinfo);
3307 r = dsi_pll_program(&cinfo);
3313 r = dsi_complexio_init(display);
3317 _dsi_print_reset_status();
3319 dsi_proto_timings();
3320 dsi_set_lp_clk_divisor();
3323 _dsi_print_reset_status();
3325 r = dsi_proto_config(display);
3329 /* enable interface */
3330 dsi_vc_enable(0, 1);
3331 dsi_vc_enable(1, 1);
3333 dsi_force_tx_stop_mode_io();
3335 if (display->ctrl && display->ctrl->enable) {
3336 r = display->ctrl->enable(display);
3341 if (display->panel && display->panel->enable) {
3342 r = display->panel->enable(display);
3347 /* enable high-speed after initial config */
3348 dsi_vc_enable_hs(0, 1);
3352 if (display->ctrl && display->ctrl->disable)
3353 display->ctrl->disable(display);
3357 dsi_complexio_uninit();
3364 static void dsi_display_uninit_dsi(struct omap_display *display)
3366 if (display->panel && display->panel->disable)
3367 display->panel->disable(display);
3368 if (display->ctrl && display->ctrl->disable)
3369 display->ctrl->disable(display);
3371 dsi_complexio_uninit();
3375 static int dsi_core_init(void)
3378 REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0);
3381 REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2);
3383 /* SIDLEMODE smart-idle */
3384 REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3);
3386 _dsi_initialize_irq();
3391 static int dsi_display_enable(struct omap_display *display)
3395 DSSDBG("dsi_display_enable\n");
3397 mutex_lock(&dsi.lock);
3399 if (display->state != OMAP_DSS_DISPLAY_DISABLED) {
3400 DSSERR("display already enabled\n");
3406 dsi_enable_pll_clock(1);
3414 r = dsi_display_init_dispc(display);
3418 r = dsi_display_init_dsi(display);
3422 display->state = OMAP_DSS_DISPLAY_ACTIVE;
3425 dsi_push_set_te(display, 1);
3427 dsi_push_set_update_mode(display, dsi.user_update_mode);
3428 dsi.target_update_mode = dsi.user_update_mode;
3430 mutex_unlock(&dsi.lock);
3432 return dsi_wait_sync(display);
3435 dsi_display_uninit_dispc(display);
3438 dsi_enable_pll_clock(0);
3440 mutex_unlock(&dsi.lock);
3441 DSSDBG("dsi_display_enable FAILED\n");
3445 static void dsi_display_disable(struct omap_display *display)
3447 DSSDBG("dsi_display_disable\n");
3449 mutex_lock(&dsi.lock);
3451 if (display->state == OMAP_DSS_DISPLAY_DISABLED ||
3452 display->state == OMAP_DSS_DISPLAY_SUSPENDED)
3455 if (dsi.target_update_mode != OMAP_DSS_UPDATE_DISABLED) {
3456 dsi_push_set_update_mode(display, OMAP_DSS_UPDATE_DISABLED);
3457 dsi.target_update_mode = OMAP_DSS_UPDATE_DISABLED;
3460 dsi_wait_sync(display);
3462 display->state = OMAP_DSS_DISPLAY_DISABLED;
3464 dsi_display_uninit_dispc(display);
3466 dsi_display_uninit_dsi(display);
3469 dsi_enable_pll_clock(0);
3471 mutex_unlock(&dsi.lock);
3474 static int dsi_display_suspend(struct omap_display *display)
3476 DSSDBG("dsi_display_suspend\n");
3478 dsi_display_disable(display);
3480 display->state = OMAP_DSS_DISPLAY_SUSPENDED;
3485 static int dsi_display_resume(struct omap_display *display)
3487 DSSDBG("dsi_display_resume\n");
3489 display->state = OMAP_DSS_DISPLAY_DISABLED;
3490 return dsi_display_enable(display);
3493 static int dsi_display_update(struct omap_display *display,
3494 u16 x, u16 y, u16 w, u16 h)
3496 DSSDBG("dsi_display_update(%d,%d %dx%d)\n", x, y, w, h);
3498 if (w == 0 || h == 0)
3501 mutex_lock(&dsi.lock);
3503 if (dsi.target_update_mode == OMAP_DSS_UPDATE_MANUAL)
3504 dsi_push_update(display, x, y, w, h);
3505 /* XXX else return error? */
3507 mutex_unlock(&dsi.lock);
3512 static int dsi_display_sync(struct omap_display *display)
3515 return dsi_wait_sync(display);
3518 static int dsi_display_set_update_mode(struct omap_display *display,
3519 enum omap_dss_update_mode mode)
3521 DSSDBGF("%d", mode);
3523 mutex_lock(&dsi.lock);
3525 if (dsi.target_update_mode != mode) {
3526 dsi_push_set_update_mode(display, mode);
3528 dsi.target_update_mode = mode;
3529 dsi.user_update_mode = mode;
3532 mutex_unlock(&dsi.lock);
3534 return dsi_wait_sync(display);
3537 static enum omap_dss_update_mode dsi_display_get_update_mode(
3538 struct omap_display *display)
3540 return dsi.update_mode;
3543 static int dsi_display_enable_te(struct omap_display *display, bool enable)
3545 DSSDBGF("%d", enable);
3547 if (!display->ctrl->enable_te)
3550 dsi_push_set_te(display, enable);
3552 return dsi_wait_sync(display);
3555 static int dsi_display_get_te(struct omap_display *display)
3562 static int dsi_display_set_rotate(struct omap_display *display, u8 rotate)
3564 DSSDBGF("%d", rotate);
3566 if (!display->ctrl->set_rotate || !display->ctrl->get_rotate)
3569 dsi_push_set_rotate(display, rotate);
3571 return dsi_wait_sync(display);
3574 static u8 dsi_display_get_rotate(struct omap_display *display)
3576 if (!display->ctrl->set_rotate || !display->ctrl->get_rotate)
3579 return display->ctrl->get_rotate(display);
3582 static int dsi_display_set_mirror(struct omap_display *display, bool mirror)
3584 DSSDBGF("%d", mirror);
3586 if (!display->ctrl->set_mirror || !display->ctrl->get_mirror)
3589 dsi_push_set_mirror(display, mirror);
3591 return dsi_wait_sync(display);
3594 static bool dsi_display_get_mirror(struct omap_display *display)
3596 if (!display->ctrl->set_mirror || !display->ctrl->get_mirror)
3599 return display->ctrl->get_mirror(display);
3602 static int dsi_display_run_test(struct omap_display *display, int test_num)
3604 long wait = msecs_to_jiffies(60000);
3605 struct completion compl;
3608 if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
3611 DSSDBGF("%d", test_num);
3613 init_completion(&compl);
3615 dsi_push_test(display, test_num, &result, &compl);
3617 DSSDBG("Waiting for SYNC to happen...\n");
3618 wait = wait_for_completion_timeout(&compl, wait);
3619 DSSDBG("Released from SYNC\n");
3622 DSSERR("timeout waiting test sync\n");
3629 static int dsi_display_memory_read(struct omap_display *display,
3630 void *buf, size_t size,
3631 u16 x, u16 y, u16 w, u16 h)
3633 long wait = msecs_to_jiffies(60000);
3634 struct completion compl;
3635 struct dsi_cmd_mem_read mem_read;
3640 if (!display->ctrl->memory_read)
3643 if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
3646 init_completion(&compl);
3653 mem_read.size = size;
3654 mem_read.ret_size = &ret_size;
3655 mem_read.completion = &compl;
3657 dsi_push_mem_read(display, &mem_read);
3659 DSSDBG("Waiting for SYNC to happen...\n");
3660 wait = wait_for_completion_timeout(&compl, wait);
3661 DSSDBG("Released from SYNC\n");
3664 DSSERR("timeout waiting mem read sync\n");
3671 static void dsi_configure_overlay(struct omap_overlay *ovl)
3673 unsigned low, high, size;
3674 enum omap_burst_size burst;
3675 enum omap_plane plane = ovl->id;
3677 burst = OMAP_DSS_BURST_16x32;
3680 dispc_set_burst_size(plane, burst);
3682 high = dispc_get_plane_fifo_size(plane) - size;
3684 dispc_setup_plane_fifo(plane, low, high);
3687 void dsi_init_display(struct omap_display *display)
3689 DSSDBG("DSI init\n");
3691 display->enable = dsi_display_enable;
3692 display->disable = dsi_display_disable;
3693 display->suspend = dsi_display_suspend;
3694 display->resume = dsi_display_resume;
3695 display->update = dsi_display_update;
3696 display->sync = dsi_display_sync;
3697 display->set_update_mode = dsi_display_set_update_mode;
3698 display->get_update_mode = dsi_display_get_update_mode;
3699 display->enable_te = dsi_display_enable_te;
3700 display->get_te = dsi_display_get_te;
3702 display->get_rotate = dsi_display_get_rotate;
3703 display->set_rotate = dsi_display_set_rotate;
3705 display->get_mirror = dsi_display_get_mirror;
3706 display->set_mirror = dsi_display_set_mirror;
3708 display->run_test = dsi_display_run_test;
3709 display->memory_read = dsi_display_memory_read;
3711 display->configure_overlay = dsi_configure_overlay;
3713 display->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
3715 dsi.vc[0].display = display;
3716 dsi.vc[1].display = display;
3723 spin_lock_init(&dsi.cmd_lock);
3724 dsi.cmd_fifo = kfifo_alloc(
3725 DSI_CMD_FIFO_LEN * sizeof(struct dsi_cmd_item),
3729 init_completion(&dsi.cmd_done);
3730 atomic_set(&dsi.cmd_fifo_full, 0);
3731 atomic_set(&dsi.cmd_pending, 0);
3733 init_completion(&dsi.bta_completion);
3735 dsi.workqueue = create_singlethread_workqueue("dsi");
3736 INIT_WORK(&dsi.framedone_work, framedone_worker);
3737 INIT_WORK(&dsi.process_work, dsi_process_cmd_fifo);
3739 mutex_init(&dsi.lock);
3741 dsi.target_update_mode = OMAP_DSS_UPDATE_DISABLED;
3742 dsi.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
3744 dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
3746 DSSERR("can't ioremap DSI\n");
3752 rev = dsi_read_reg(DSI_REVISION);
3753 printk(KERN_INFO "OMAP DSI rev %d.%d\n",
3754 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3763 flush_workqueue(dsi.workqueue);
3764 destroy_workqueue(dsi.workqueue);
3768 kfifo_free(dsi.cmd_fifo);
3770 DSSDBG("omap_dsi_exit\n");