2 * linux/drivers/video/omap2/dss/dsi.c
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #define DSS_SUBSYS_NAME "DSI"
22 #include <linux/kernel.h>
24 #include <linux/clk.h>
25 #include <linux/device.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/mutex.h>
30 #include <linux/semaphore.h>
31 #include <linux/seq_file.h>
32 #include <linux/platform_device.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/wait.h>
35 #include <linux/workqueue.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/debugfs.h>
40 #include <video/omapdss.h>
41 #include <plat/clock.h>
44 #include "dss_features.h"
46 /*#define VERBOSE_IRQ*/
47 #define DSI_CATCH_MISSING_TE
49 struct dsi_reg { u16 idx; };
51 #define DSI_REG(idx) ((const struct dsi_reg) { idx })
53 #define DSI_SZ_REGS SZ_1K
54 /* DSI Protocol Engine */
56 #define DSI_REVISION DSI_REG(0x0000)
57 #define DSI_SYSCONFIG DSI_REG(0x0010)
58 #define DSI_SYSSTATUS DSI_REG(0x0014)
59 #define DSI_IRQSTATUS DSI_REG(0x0018)
60 #define DSI_IRQENABLE DSI_REG(0x001C)
61 #define DSI_CTRL DSI_REG(0x0040)
62 #define DSI_GNQ DSI_REG(0x0044)
63 #define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
64 #define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
65 #define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
66 #define DSI_CLK_CTRL DSI_REG(0x0054)
67 #define DSI_TIMING1 DSI_REG(0x0058)
68 #define DSI_TIMING2 DSI_REG(0x005C)
69 #define DSI_VM_TIMING1 DSI_REG(0x0060)
70 #define DSI_VM_TIMING2 DSI_REG(0x0064)
71 #define DSI_VM_TIMING3 DSI_REG(0x0068)
72 #define DSI_CLK_TIMING DSI_REG(0x006C)
73 #define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
74 #define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
75 #define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
76 #define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
77 #define DSI_VM_TIMING4 DSI_REG(0x0080)
78 #define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
79 #define DSI_VM_TIMING5 DSI_REG(0x0088)
80 #define DSI_VM_TIMING6 DSI_REG(0x008C)
81 #define DSI_VM_TIMING7 DSI_REG(0x0090)
82 #define DSI_STOPCLK_TIMING DSI_REG(0x0094)
83 #define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
84 #define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
85 #define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
86 #define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
87 #define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
88 #define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
89 #define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
93 #define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
94 #define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
95 #define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
96 #define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
97 #define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
99 /* DSI_PLL_CTRL_SCP */
101 #define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
102 #define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
103 #define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
104 #define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
105 #define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
107 #define REG_GET(dsidev, idx, start, end) \
108 FLD_GET(dsi_read_reg(dsidev, idx), start, end)
110 #define REG_FLD_MOD(dsidev, idx, val, start, end) \
111 dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
113 /* Global interrupts */
114 #define DSI_IRQ_VC0 (1 << 0)
115 #define DSI_IRQ_VC1 (1 << 1)
116 #define DSI_IRQ_VC2 (1 << 2)
117 #define DSI_IRQ_VC3 (1 << 3)
118 #define DSI_IRQ_WAKEUP (1 << 4)
119 #define DSI_IRQ_RESYNC (1 << 5)
120 #define DSI_IRQ_PLL_LOCK (1 << 7)
121 #define DSI_IRQ_PLL_UNLOCK (1 << 8)
122 #define DSI_IRQ_PLL_RECALL (1 << 9)
123 #define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
124 #define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
125 #define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
126 #define DSI_IRQ_TE_TRIGGER (1 << 16)
127 #define DSI_IRQ_ACK_TRIGGER (1 << 17)
128 #define DSI_IRQ_SYNC_LOST (1 << 18)
129 #define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
130 #define DSI_IRQ_TA_TIMEOUT (1 << 20)
131 #define DSI_IRQ_ERROR_MASK \
132 (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
134 #define DSI_IRQ_CHANNEL_MASK 0xf
136 /* Virtual channel interrupts */
137 #define DSI_VC_IRQ_CS (1 << 0)
138 #define DSI_VC_IRQ_ECC_CORR (1 << 1)
139 #define DSI_VC_IRQ_PACKET_SENT (1 << 2)
140 #define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
141 #define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
142 #define DSI_VC_IRQ_BTA (1 << 5)
143 #define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
144 #define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
145 #define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
146 #define DSI_VC_IRQ_ERROR_MASK \
147 (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
148 DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
149 DSI_VC_IRQ_FIFO_TX_UDF)
151 /* ComplexIO interrupts */
152 #define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
153 #define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
154 #define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
155 #define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
156 #define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
157 #define DSI_CIO_IRQ_ERRESC1 (1 << 5)
158 #define DSI_CIO_IRQ_ERRESC2 (1 << 6)
159 #define DSI_CIO_IRQ_ERRESC3 (1 << 7)
160 #define DSI_CIO_IRQ_ERRESC4 (1 << 8)
161 #define DSI_CIO_IRQ_ERRESC5 (1 << 9)
162 #define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
163 #define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
164 #define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
165 #define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
166 #define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
167 #define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
168 #define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
169 #define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
170 #define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
171 #define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
172 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
173 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
174 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
175 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
176 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
177 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
178 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
179 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
180 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
181 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
182 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
183 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
184 #define DSI_CIO_IRQ_ERROR_MASK \
185 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
186 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
187 DSI_CIO_IRQ_ERRSYNCESC5 | \
188 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
189 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
190 DSI_CIO_IRQ_ERRESC5 | \
191 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
192 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
193 DSI_CIO_IRQ_ERRCONTROL5 | \
194 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
195 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
196 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
197 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
198 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
200 #define DSI_DT_DCS_SHORT_WRITE_0 0x05
201 #define DSI_DT_DCS_SHORT_WRITE_1 0x15
202 #define DSI_DT_DCS_READ 0x06
203 #define DSI_DT_SET_MAX_RET_PKG_SIZE 0x37
204 #define DSI_DT_NULL_PACKET 0x09
205 #define DSI_DT_DCS_LONG_WRITE 0x39
207 #define DSI_DT_RX_ACK_WITH_ERR 0x02
208 #define DSI_DT_RX_DCS_LONG_READ 0x1c
209 #define DSI_DT_RX_SHORT_READ_1 0x21
210 #define DSI_DT_RX_SHORT_READ_2 0x22
212 typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
214 #define DSI_MAX_NR_ISRS 2
216 struct dsi_isr_data {
224 DSI_FIFO_SIZE_32 = 1,
225 DSI_FIFO_SIZE_64 = 2,
226 DSI_FIFO_SIZE_96 = 3,
227 DSI_FIFO_SIZE_128 = 4,
238 DSI_DATA1_P = 1 << 2,
239 DSI_DATA1_N = 1 << 3,
240 DSI_DATA2_P = 1 << 4,
241 DSI_DATA2_N = 1 << 5,
242 DSI_DATA3_P = 1 << 6,
243 DSI_DATA3_N = 1 << 7,
244 DSI_DATA4_P = 1 << 8,
245 DSI_DATA4_N = 1 << 9,
248 struct dsi_update_region {
250 struct omap_dss_device *device;
253 struct dsi_irq_stats {
254 unsigned long last_reset;
256 unsigned dsi_irqs[32];
257 unsigned vc_irqs[4][32];
258 unsigned cio_irqs[32];
261 struct dsi_isr_tables {
262 struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
263 struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
264 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
268 struct platform_device *pdev;
272 void (*dsi_mux_pads)(bool enable);
274 struct dsi_clock_info current_cinfo;
276 bool vdds_dsi_enabled;
277 struct regulator *vdds_dsi_reg;
280 enum dsi_vc_mode mode;
281 struct omap_dss_device *dssdev;
282 enum fifo_size fifo_size;
287 struct semaphore bus_lock;
292 struct dsi_isr_tables isr_tables;
293 /* space for a copy used by the interrupt handler */
294 struct dsi_isr_tables isr_tables_copy;
297 struct dsi_update_region update_region;
302 void (*framedone_callback)(int, void *);
303 void *framedone_data;
305 struct delayed_work framedone_timeout_work;
307 #ifdef DSI_CATCH_MISSING_TE
308 struct timer_list te_timer;
311 unsigned long cache_req_pck;
312 unsigned long cache_clk_freq;
313 struct dsi_clock_info cache_cinfo;
316 spinlock_t errors_lock;
318 ktime_t perf_setup_time;
319 ktime_t perf_start_time;
324 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
325 spinlock_t irq_stats_lock;
326 struct dsi_irq_stats irq_stats;
328 /* DSI PLL Parameter Ranges */
329 unsigned long regm_max, regn_max;
330 unsigned long regm_dispc_max, regm_dsi_max;
331 unsigned long fint_min, fint_max;
332 unsigned long lpdiv_max;
336 unsigned scp_clk_refcount;
339 struct dsi_packet_sent_handler_data {
340 struct platform_device *dsidev;
341 struct completion *completion;
344 static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
347 static unsigned int dsi_perf;
348 module_param_named(dsi_perf, dsi_perf, bool, 0644);
351 static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
353 return dev_get_drvdata(&dsidev->dev);
356 static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
358 return dsi_pdev_map[dssdev->phy.dsi.module];
361 struct platform_device *dsi_get_dsidev_from_id(int module)
363 return dsi_pdev_map[module];
366 static int dsi_get_dsidev_id(struct platform_device *dsidev)
368 /* TEMP: Pass 0 as the dsi module index till the time the dsi platform
369 * device names aren't changed to the form "omapdss_dsi.0",
370 * "omapdss_dsi.1" and so on */
371 BUG_ON(dsidev->id != -1);
376 static inline void dsi_write_reg(struct platform_device *dsidev,
377 const struct dsi_reg idx, u32 val)
379 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
381 __raw_writel(val, dsi->base + idx.idx);
384 static inline u32 dsi_read_reg(struct platform_device *dsidev,
385 const struct dsi_reg idx)
387 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
389 return __raw_readl(dsi->base + idx.idx);
393 void dsi_save_context(void)
397 void dsi_restore_context(void)
401 void dsi_bus_lock(struct omap_dss_device *dssdev)
403 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
404 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
406 down(&dsi->bus_lock);
408 EXPORT_SYMBOL(dsi_bus_lock);
410 void dsi_bus_unlock(struct omap_dss_device *dssdev)
412 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
413 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
417 EXPORT_SYMBOL(dsi_bus_unlock);
419 static bool dsi_bus_is_locked(struct platform_device *dsidev)
421 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
423 return dsi->bus_lock.count == 0;
426 static void dsi_completion_handler(void *data, u32 mask)
428 complete((struct completion *)data);
431 static inline int wait_for_bit_change(struct platform_device *dsidev,
432 const struct dsi_reg idx, int bitnum, int value)
436 while (REG_GET(dsidev, idx, bitnum, bitnum) != value) {
445 static void dsi_perf_mark_setup(struct platform_device *dsidev)
447 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
448 dsi->perf_setup_time = ktime_get();
451 static void dsi_perf_mark_start(struct platform_device *dsidev)
453 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
454 dsi->perf_start_time = ktime_get();
457 static void dsi_perf_show(struct platform_device *dsidev, const char *name)
459 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
460 ktime_t t, setup_time, trans_time;
462 u32 setup_us, trans_us, total_us;
469 setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
470 setup_us = (u32)ktime_to_us(setup_time);
474 trans_time = ktime_sub(t, dsi->perf_start_time);
475 trans_us = (u32)ktime_to_us(trans_time);
479 total_us = setup_us + trans_us;
481 total_bytes = dsi->update_region.w *
482 dsi->update_region.h *
483 dsi->update_region.device->ctrl.pixel_size / 8;
485 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
486 "%u bytes, %u kbytes/sec\n",
491 1000*1000 / total_us,
493 total_bytes * 1000 / total_us);
496 static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
500 static inline void dsi_perf_mark_start(struct platform_device *dsidev)
504 static inline void dsi_perf_show(struct platform_device *dsidev,
510 static void print_irq_status(u32 status)
516 if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
519 printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
522 if (status & DSI_IRQ_##x) \
548 static void print_irq_status_vc(int channel, u32 status)
554 if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
557 printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
560 if (status & DSI_VC_IRQ_##x) \
577 static void print_irq_status_cio(u32 status)
582 printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
585 if (status & DSI_CIO_IRQ_##x) \
599 PIS(ERRCONTENTIONLP0_1);
600 PIS(ERRCONTENTIONLP1_1);
601 PIS(ERRCONTENTIONLP0_2);
602 PIS(ERRCONTENTIONLP1_2);
603 PIS(ERRCONTENTIONLP0_3);
604 PIS(ERRCONTENTIONLP1_3);
605 PIS(ULPSACTIVENOT_ALL0);
606 PIS(ULPSACTIVENOT_ALL1);
612 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
613 static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
614 u32 *vcstatus, u32 ciostatus)
616 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
619 spin_lock(&dsi->irq_stats_lock);
621 dsi->irq_stats.irq_count++;
622 dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
624 for (i = 0; i < 4; ++i)
625 dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
627 dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
629 spin_unlock(&dsi->irq_stats_lock);
632 #define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
635 static int debug_irq;
637 static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
638 u32 *vcstatus, u32 ciostatus)
640 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
643 if (irqstatus & DSI_IRQ_ERROR_MASK) {
644 DSSERR("DSI error, irqstatus %x\n", irqstatus);
645 print_irq_status(irqstatus);
646 spin_lock(&dsi->errors_lock);
647 dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
648 spin_unlock(&dsi->errors_lock);
649 } else if (debug_irq) {
650 print_irq_status(irqstatus);
653 for (i = 0; i < 4; ++i) {
654 if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
655 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
657 print_irq_status_vc(i, vcstatus[i]);
658 } else if (debug_irq) {
659 print_irq_status_vc(i, vcstatus[i]);
663 if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
664 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
665 print_irq_status_cio(ciostatus);
666 } else if (debug_irq) {
667 print_irq_status_cio(ciostatus);
671 static void dsi_call_isrs(struct dsi_isr_data *isr_array,
672 unsigned isr_array_size, u32 irqstatus)
674 struct dsi_isr_data *isr_data;
677 for (i = 0; i < isr_array_size; i++) {
678 isr_data = &isr_array[i];
679 if (isr_data->isr && isr_data->mask & irqstatus)
680 isr_data->isr(isr_data->arg, irqstatus);
684 static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
685 u32 irqstatus, u32 *vcstatus, u32 ciostatus)
689 dsi_call_isrs(isr_tables->isr_table,
690 ARRAY_SIZE(isr_tables->isr_table),
693 for (i = 0; i < 4; ++i) {
694 if (vcstatus[i] == 0)
696 dsi_call_isrs(isr_tables->isr_table_vc[i],
697 ARRAY_SIZE(isr_tables->isr_table_vc[i]),
702 dsi_call_isrs(isr_tables->isr_table_cio,
703 ARRAY_SIZE(isr_tables->isr_table_cio),
707 static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
709 struct platform_device *dsidev;
710 struct dsi_data *dsi;
711 u32 irqstatus, vcstatus[4], ciostatus;
714 dsidev = (struct platform_device *) arg;
715 dsi = dsi_get_dsidrv_data(dsidev);
717 spin_lock(&dsi->irq_lock);
719 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
721 /* IRQ is not for us */
723 spin_unlock(&dsi->irq_lock);
727 dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
728 /* flush posted write */
729 dsi_read_reg(dsidev, DSI_IRQSTATUS);
731 for (i = 0; i < 4; ++i) {
732 if ((irqstatus & (1 << i)) == 0) {
737 vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
739 dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
740 /* flush posted write */
741 dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
744 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
745 ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
747 dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
748 /* flush posted write */
749 dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
754 #ifdef DSI_CATCH_MISSING_TE
755 if (irqstatus & DSI_IRQ_TE_TRIGGER)
756 del_timer(&dsi->te_timer);
759 /* make a copy and unlock, so that isrs can unregister
761 memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
762 sizeof(dsi->isr_tables));
764 spin_unlock(&dsi->irq_lock);
766 dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
768 dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
770 dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
775 /* dsi->irq_lock has to be locked by the caller */
776 static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
777 struct dsi_isr_data *isr_array,
778 unsigned isr_array_size, u32 default_mask,
779 const struct dsi_reg enable_reg,
780 const struct dsi_reg status_reg)
782 struct dsi_isr_data *isr_data;
789 for (i = 0; i < isr_array_size; i++) {
790 isr_data = &isr_array[i];
792 if (isr_data->isr == NULL)
795 mask |= isr_data->mask;
798 old_mask = dsi_read_reg(dsidev, enable_reg);
799 /* clear the irqstatus for newly enabled irqs */
800 dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
801 dsi_write_reg(dsidev, enable_reg, mask);
803 /* flush posted writes */
804 dsi_read_reg(dsidev, enable_reg);
805 dsi_read_reg(dsidev, status_reg);
808 /* dsi->irq_lock has to be locked by the caller */
809 static void _omap_dsi_set_irqs(struct platform_device *dsidev)
811 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
812 u32 mask = DSI_IRQ_ERROR_MASK;
813 #ifdef DSI_CATCH_MISSING_TE
814 mask |= DSI_IRQ_TE_TRIGGER;
816 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
817 ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
818 DSI_IRQENABLE, DSI_IRQSTATUS);
821 /* dsi->irq_lock has to be locked by the caller */
822 static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
824 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
826 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
827 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
828 DSI_VC_IRQ_ERROR_MASK,
829 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
832 /* dsi->irq_lock has to be locked by the caller */
833 static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
835 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
837 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
838 ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
839 DSI_CIO_IRQ_ERROR_MASK,
840 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
843 static void _dsi_initialize_irq(struct platform_device *dsidev)
845 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
849 spin_lock_irqsave(&dsi->irq_lock, flags);
851 memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
853 _omap_dsi_set_irqs(dsidev);
854 for (vc = 0; vc < 4; ++vc)
855 _omap_dsi_set_irqs_vc(dsidev, vc);
856 _omap_dsi_set_irqs_cio(dsidev);
858 spin_unlock_irqrestore(&dsi->irq_lock, flags);
861 static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
862 struct dsi_isr_data *isr_array, unsigned isr_array_size)
864 struct dsi_isr_data *isr_data;
870 /* check for duplicate entry and find a free slot */
872 for (i = 0; i < isr_array_size; i++) {
873 isr_data = &isr_array[i];
875 if (isr_data->isr == isr && isr_data->arg == arg &&
876 isr_data->mask == mask) {
880 if (isr_data->isr == NULL && free_idx == -1)
887 isr_data = &isr_array[free_idx];
890 isr_data->mask = mask;
895 static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
896 struct dsi_isr_data *isr_array, unsigned isr_array_size)
898 struct dsi_isr_data *isr_data;
901 for (i = 0; i < isr_array_size; i++) {
902 isr_data = &isr_array[i];
903 if (isr_data->isr != isr || isr_data->arg != arg ||
904 isr_data->mask != mask)
907 isr_data->isr = NULL;
908 isr_data->arg = NULL;
917 static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
920 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
924 spin_lock_irqsave(&dsi->irq_lock, flags);
926 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
927 ARRAY_SIZE(dsi->isr_tables.isr_table));
930 _omap_dsi_set_irqs(dsidev);
932 spin_unlock_irqrestore(&dsi->irq_lock, flags);
937 static int dsi_unregister_isr(struct platform_device *dsidev,
938 omap_dsi_isr_t isr, void *arg, u32 mask)
940 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
944 spin_lock_irqsave(&dsi->irq_lock, flags);
946 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
947 ARRAY_SIZE(dsi->isr_tables.isr_table));
950 _omap_dsi_set_irqs(dsidev);
952 spin_unlock_irqrestore(&dsi->irq_lock, flags);
957 static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
958 omap_dsi_isr_t isr, void *arg, u32 mask)
960 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
964 spin_lock_irqsave(&dsi->irq_lock, flags);
966 r = _dsi_register_isr(isr, arg, mask,
967 dsi->isr_tables.isr_table_vc[channel],
968 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
971 _omap_dsi_set_irqs_vc(dsidev, channel);
973 spin_unlock_irqrestore(&dsi->irq_lock, flags);
978 static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
979 omap_dsi_isr_t isr, void *arg, u32 mask)
981 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
985 spin_lock_irqsave(&dsi->irq_lock, flags);
987 r = _dsi_unregister_isr(isr, arg, mask,
988 dsi->isr_tables.isr_table_vc[channel],
989 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
992 _omap_dsi_set_irqs_vc(dsidev, channel);
994 spin_unlock_irqrestore(&dsi->irq_lock, flags);
999 static int dsi_register_isr_cio(struct platform_device *dsidev,
1000 omap_dsi_isr_t isr, void *arg, u32 mask)
1002 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1003 unsigned long flags;
1006 spin_lock_irqsave(&dsi->irq_lock, flags);
1008 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1009 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1012 _omap_dsi_set_irqs_cio(dsidev);
1014 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1019 static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1020 omap_dsi_isr_t isr, void *arg, u32 mask)
1022 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1023 unsigned long flags;
1026 spin_lock_irqsave(&dsi->irq_lock, flags);
1028 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1029 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1032 _omap_dsi_set_irqs_cio(dsidev);
1034 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1039 static u32 dsi_get_errors(struct platform_device *dsidev)
1041 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1042 unsigned long flags;
1044 spin_lock_irqsave(&dsi->errors_lock, flags);
1047 spin_unlock_irqrestore(&dsi->errors_lock, flags);
1051 /* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
1052 static inline void enable_clocks(bool enable)
1055 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
1057 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
1060 /* source clock for DSI PLL. this could also be PCLKFREE */
1061 static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1064 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1067 dss_clk_enable(DSS_CLK_SYSCK);
1069 dss_clk_disable(DSS_CLK_SYSCK);
1071 if (enable && dsi->pll_locked) {
1072 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1073 DSSERR("cannot lock PLL when enabling clocks\n");
1078 static void _dsi_print_reset_status(struct platform_device *dsidev)
1086 /* A dummy read using the SCP interface to any DSIPHY register is
1087 * required after DSIPHY reset to complete the reset of the DSI complex
1089 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1091 printk(KERN_DEBUG "DSI resets: ");
1093 l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
1094 printk("PLL (%d) ", FLD_GET(l, 0, 0));
1096 l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1097 printk("CIO (%d) ", FLD_GET(l, 29, 29));
1099 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
1109 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1110 printk("PHY (%x%x%x, %d, %d, %d)\n",
1116 FLD_GET(l, 31, 31));
1119 #define _dsi_print_reset_status(x)
1122 static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1124 DSSDBG("dsi_if_enable(%d)\n", enable);
1126 enable = enable ? 1 : 0;
1127 REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
1129 if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
1130 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
1137 unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1139 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1141 return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
1144 static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1146 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1148 return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1151 static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1153 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1155 return dsi->current_cinfo.clkin4ddr / 16;
1158 static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1161 int dsi_module = dsi_get_dsidev_id(dsidev);
1163 if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
1164 /* DSI FCLK source is DSS_CLK_FCK */
1165 r = dss_clk_get_rate(DSS_CLK_FCK);
1167 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1168 r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1174 static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1176 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1177 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1178 unsigned long dsi_fclk;
1179 unsigned lp_clk_div;
1180 unsigned long lp_clk;
1182 lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
1184 if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1187 dsi_fclk = dsi_fclk_rate(dsidev);
1189 lp_clk = dsi_fclk / 2 / lp_clk_div;
1191 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1192 dsi->current_cinfo.lp_clk = lp_clk;
1193 dsi->current_cinfo.lp_clk_div = lp_clk_div;
1195 /* LP_CLK_DIVISOR */
1196 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1198 /* LP_RX_SYNCHRO_ENABLE */
1199 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1204 static void dsi_enable_scp_clk(struct platform_device *dsidev)
1206 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1208 if (dsi->scp_clk_refcount++ == 0)
1209 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1212 static void dsi_disable_scp_clk(struct platform_device *dsidev)
1214 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1216 WARN_ON(dsi->scp_clk_refcount == 0);
1217 if (--dsi->scp_clk_refcount == 0)
1218 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1221 enum dsi_pll_power_state {
1222 DSI_PLL_POWER_OFF = 0x0,
1223 DSI_PLL_POWER_ON_HSCLK = 0x1,
1224 DSI_PLL_POWER_ON_ALL = 0x2,
1225 DSI_PLL_POWER_ON_DIV = 0x3,
1228 static int dsi_pll_power(struct platform_device *dsidev,
1229 enum dsi_pll_power_state state)
1233 /* DSI-PLL power command 0x3 is not working */
1234 if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1235 state == DSI_PLL_POWER_ON_DIV)
1236 state = DSI_PLL_POWER_ON_ALL;
1239 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1241 /* PLL_PWR_STATUS */
1242 while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1244 DSSERR("Failed to set DSI PLL power mode to %d\n",
1254 /* calculate clock rates using dividers in cinfo */
1255 static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1256 struct dsi_clock_info *cinfo)
1258 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1259 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1261 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1264 if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1267 if (cinfo->regm_dispc > dsi->regm_dispc_max)
1270 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1273 if (cinfo->use_sys_clk) {
1274 cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
1275 /* XXX it is unclear if highfreq should be used
1276 * with DSS_SYS_CLK source also */
1277 cinfo->highfreq = 0;
1279 cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
1281 if (cinfo->clkin < 32000000)
1282 cinfo->highfreq = 0;
1284 cinfo->highfreq = 1;
1287 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1289 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1292 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
1294 if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
1297 if (cinfo->regm_dispc > 0)
1298 cinfo->dsi_pll_hsdiv_dispc_clk =
1299 cinfo->clkin4ddr / cinfo->regm_dispc;
1301 cinfo->dsi_pll_hsdiv_dispc_clk = 0;
1303 if (cinfo->regm_dsi > 0)
1304 cinfo->dsi_pll_hsdiv_dsi_clk =
1305 cinfo->clkin4ddr / cinfo->regm_dsi;
1307 cinfo->dsi_pll_hsdiv_dsi_clk = 0;
1312 int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1313 unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1314 struct dispc_clock_info *dispc_cinfo)
1316 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1317 struct dsi_clock_info cur, best;
1318 struct dispc_clock_info best_dispc;
1319 int min_fck_per_pck;
1321 unsigned long dss_sys_clk, max_dss_fck;
1323 dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
1325 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1327 if (req_pck == dsi->cache_req_pck &&
1328 dsi->cache_cinfo.clkin == dss_sys_clk) {
1329 DSSDBG("DSI clock info found from cache\n");
1330 *dsi_cinfo = dsi->cache_cinfo;
1331 dispc_find_clk_divs(is_tft, req_pck,
1332 dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
1336 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1338 if (min_fck_per_pck &&
1339 req_pck * min_fck_per_pck > max_dss_fck) {
1340 DSSERR("Requested pixel clock not possible with the current "
1341 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1342 "the constraint off.\n");
1343 min_fck_per_pck = 0;
1346 DSSDBG("dsi_pll_calc\n");
1349 memset(&best, 0, sizeof(best));
1350 memset(&best_dispc, 0, sizeof(best_dispc));
1352 memset(&cur, 0, sizeof(cur));
1353 cur.clkin = dss_sys_clk;
1354 cur.use_sys_clk = 1;
1357 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
1358 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1359 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1360 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1361 if (cur.highfreq == 0)
1362 cur.fint = cur.clkin / cur.regn;
1364 cur.fint = cur.clkin / (2 * cur.regn);
1366 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1369 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
1370 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1373 a = 2 * cur.regm * (cur.clkin/1000);
1374 b = cur.regn * (cur.highfreq + 1);
1375 cur.clkin4ddr = a / b * 1000;
1377 if (cur.clkin4ddr > 1800 * 1000 * 1000)
1380 /* dsi_pll_hsdiv_dispc_clk(MHz) =
1381 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
1382 for (cur.regm_dispc = 1; cur.regm_dispc <
1383 dsi->regm_dispc_max; ++cur.regm_dispc) {
1384 struct dispc_clock_info cur_dispc;
1385 cur.dsi_pll_hsdiv_dispc_clk =
1386 cur.clkin4ddr / cur.regm_dispc;
1388 /* this will narrow down the search a bit,
1389 * but still give pixclocks below what was
1391 if (cur.dsi_pll_hsdiv_dispc_clk < req_pck)
1394 if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
1397 if (min_fck_per_pck &&
1398 cur.dsi_pll_hsdiv_dispc_clk <
1399 req_pck * min_fck_per_pck)
1404 dispc_find_clk_divs(is_tft, req_pck,
1405 cur.dsi_pll_hsdiv_dispc_clk,
1408 if (abs(cur_dispc.pck - req_pck) <
1409 abs(best_dispc.pck - req_pck)) {
1411 best_dispc = cur_dispc;
1413 if (cur_dispc.pck == req_pck)
1421 if (min_fck_per_pck) {
1422 DSSERR("Could not find suitable clock settings.\n"
1423 "Turning FCK/PCK constraint off and"
1425 min_fck_per_pck = 0;
1429 DSSERR("Could not find suitable clock settings.\n");
1434 /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
1436 best.dsi_pll_hsdiv_dsi_clk = 0;
1441 *dispc_cinfo = best_dispc;
1443 dsi->cache_req_pck = req_pck;
1444 dsi->cache_clk_freq = 0;
1445 dsi->cache_cinfo = best;
1450 int dsi_pll_set_clock_div(struct platform_device *dsidev,
1451 struct dsi_clock_info *cinfo)
1453 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1457 u8 regn_start, regn_end, regm_start, regm_end;
1458 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1462 dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
1463 dsi->current_cinfo.highfreq = cinfo->highfreq;
1465 dsi->current_cinfo.fint = cinfo->fint;
1466 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1467 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1468 cinfo->dsi_pll_hsdiv_dispc_clk;
1469 dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1470 cinfo->dsi_pll_hsdiv_dsi_clk;
1472 dsi->current_cinfo.regn = cinfo->regn;
1473 dsi->current_cinfo.regm = cinfo->regm;
1474 dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1475 dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1477 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1479 DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
1480 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
1484 /* DSIPHY == CLKIN4DDR */
1485 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
1489 cinfo->highfreq + 1,
1492 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1493 cinfo->clkin4ddr / 1000 / 1000 / 2);
1495 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1497 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1498 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1499 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1500 cinfo->dsi_pll_hsdiv_dispc_clk);
1501 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1502 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1503 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1504 cinfo->dsi_pll_hsdiv_dsi_clk);
1506 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, ®n_start, ®n_end);
1507 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, ®m_start, ®m_end);
1508 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, ®m_dispc_start,
1510 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, ®m_dsi_start,
1513 /* DSI_PLL_AUTOMODE = manual */
1514 REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1516 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1517 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1519 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
1521 l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
1523 l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
1524 regm_dispc_start, regm_dispc_end);
1525 /* DSIPROTO_CLOCK_DIV */
1526 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1527 regm_dsi_start, regm_dsi_end);
1528 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1530 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1532 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1533 f = cinfo->fint < 1000000 ? 0x3 :
1534 cinfo->fint < 1250000 ? 0x4 :
1535 cinfo->fint < 1500000 ? 0x5 :
1536 cinfo->fint < 1750000 ? 0x6 :
1540 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1542 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1543 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1544 l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
1545 11, 11); /* DSI_PLL_CLKSEL */
1546 l = FLD_MOD(l, cinfo->highfreq,
1547 12, 12); /* DSI_PLL_HIGHFREQ */
1548 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1549 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1550 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1551 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1553 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1555 if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1556 DSSERR("dsi pll go bit not going down.\n");
1561 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1562 DSSERR("cannot lock PLL\n");
1567 dsi->pll_locked = 1;
1569 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1570 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1571 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1572 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1573 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1574 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1575 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1576 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1577 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1578 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1579 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1580 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1581 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1582 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1583 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1584 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1586 DSSDBG("PLL config done\n");
1591 int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1594 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1596 enum dsi_pll_power_state pwstate;
1598 DSSDBG("PLL init\n");
1600 if (dsi->vdds_dsi_reg == NULL) {
1601 struct regulator *vdds_dsi;
1603 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1605 if (IS_ERR(vdds_dsi)) {
1606 DSSERR("can't get VDDS_DSI regulator\n");
1607 return PTR_ERR(vdds_dsi);
1610 dsi->vdds_dsi_reg = vdds_dsi;
1614 dsi_enable_pll_clock(dsidev, 1);
1616 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1618 dsi_enable_scp_clk(dsidev);
1620 if (!dsi->vdds_dsi_enabled) {
1621 r = regulator_enable(dsi->vdds_dsi_reg);
1624 dsi->vdds_dsi_enabled = true;
1627 /* XXX PLL does not come out of reset without this... */
1628 dispc_pck_free_enable(1);
1630 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1631 DSSERR("PLL not coming out of reset.\n");
1633 dispc_pck_free_enable(0);
1637 /* XXX ... but if left on, we get problems when planes do not
1638 * fill the whole display. No idea about this */
1639 dispc_pck_free_enable(0);
1641 if (enable_hsclk && enable_hsdiv)
1642 pwstate = DSI_PLL_POWER_ON_ALL;
1643 else if (enable_hsclk)
1644 pwstate = DSI_PLL_POWER_ON_HSCLK;
1645 else if (enable_hsdiv)
1646 pwstate = DSI_PLL_POWER_ON_DIV;
1648 pwstate = DSI_PLL_POWER_OFF;
1650 r = dsi_pll_power(dsidev, pwstate);
1655 DSSDBG("PLL init done\n");
1659 if (dsi->vdds_dsi_enabled) {
1660 regulator_disable(dsi->vdds_dsi_reg);
1661 dsi->vdds_dsi_enabled = false;
1664 dsi_disable_scp_clk(dsidev);
1666 dsi_enable_pll_clock(dsidev, 0);
1670 void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1672 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1674 dsi->pll_locked = 0;
1675 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1676 if (disconnect_lanes) {
1677 WARN_ON(!dsi->vdds_dsi_enabled);
1678 regulator_disable(dsi->vdds_dsi_reg);
1679 dsi->vdds_dsi_enabled = false;
1682 dsi_disable_scp_clk(dsidev);
1684 dsi_enable_pll_clock(dsidev, 0);
1686 DSSDBG("PLL uninit done\n");
1689 static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1692 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1693 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1694 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1695 int dsi_module = dsi_get_dsidev_id(dsidev);
1697 dispc_clk_src = dss_get_dispc_clk_source();
1698 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1702 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1704 seq_printf(s, "dsi pll source = %s\n",
1705 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
1707 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1709 seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
1710 cinfo->clkin4ddr, cinfo->regm);
1712 seq_printf(s, "%s (%s)\t%-16luregm_dispc %u\t(%s)\n",
1713 dss_get_generic_clk_source_name(dispc_clk_src),
1714 dss_feat_get_clk_source_name(dispc_clk_src),
1715 cinfo->dsi_pll_hsdiv_dispc_clk,
1717 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1720 seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
1721 dss_get_generic_clk_source_name(dsi_clk_src),
1722 dss_feat_get_clk_source_name(dsi_clk_src),
1723 cinfo->dsi_pll_hsdiv_dsi_clk,
1725 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1728 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1730 seq_printf(s, "dsi fclk source = %s (%s)\n",
1731 dss_get_generic_clk_source_name(dsi_clk_src),
1732 dss_feat_get_clk_source_name(dsi_clk_src));
1734 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1736 seq_printf(s, "DDR_CLK\t\t%lu\n",
1737 cinfo->clkin4ddr / 4);
1739 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1741 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1746 void dsi_dump_clocks(struct seq_file *s)
1748 struct platform_device *dsidev;
1751 for (i = 0; i < MAX_NUM_DSI; i++) {
1752 dsidev = dsi_get_dsidev_from_id(i);
1754 dsi_dump_dsidev_clocks(dsidev, s);
1758 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1759 static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1762 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1763 unsigned long flags;
1764 struct dsi_irq_stats stats;
1765 int dsi_module = dsi_get_dsidev_id(dsidev);
1767 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1769 stats = dsi->irq_stats;
1770 memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1771 dsi->irq_stats.last_reset = jiffies;
1773 spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1775 seq_printf(s, "period %u ms\n",
1776 jiffies_to_msecs(jiffies - stats.last_reset));
1778 seq_printf(s, "irqs %d\n", stats.irq_count);
1780 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1782 seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
1798 PIS(LDO_POWER_GOOD);
1803 seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1804 stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1805 stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1806 stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1807 stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1809 seq_printf(s, "-- VC interrupts --\n");
1818 PIS(PP_BUSY_CHANGE);
1822 seq_printf(s, "%-20s %10d\n", #x, \
1823 stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1825 seq_printf(s, "-- CIO interrupts --\n");
1838 PIS(ERRCONTENTIONLP0_1);
1839 PIS(ERRCONTENTIONLP1_1);
1840 PIS(ERRCONTENTIONLP0_2);
1841 PIS(ERRCONTENTIONLP1_2);
1842 PIS(ERRCONTENTIONLP0_3);
1843 PIS(ERRCONTENTIONLP1_3);
1844 PIS(ULPSACTIVENOT_ALL0);
1845 PIS(ULPSACTIVENOT_ALL1);
1849 static void dsi1_dump_irqs(struct seq_file *s)
1851 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1853 dsi_dump_dsidev_irqs(dsidev, s);
1856 static void dsi2_dump_irqs(struct seq_file *s)
1858 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1860 dsi_dump_dsidev_irqs(dsidev, s);
1863 void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
1864 const struct file_operations *debug_fops)
1866 struct platform_device *dsidev;
1868 dsidev = dsi_get_dsidev_from_id(0);
1870 debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
1871 &dsi1_dump_irqs, debug_fops);
1873 dsidev = dsi_get_dsidev_from_id(1);
1875 debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
1876 &dsi2_dump_irqs, debug_fops);
1880 static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1883 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1885 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
1886 dsi_enable_scp_clk(dsidev);
1888 DUMPREG(DSI_REVISION);
1889 DUMPREG(DSI_SYSCONFIG);
1890 DUMPREG(DSI_SYSSTATUS);
1891 DUMPREG(DSI_IRQSTATUS);
1892 DUMPREG(DSI_IRQENABLE);
1894 DUMPREG(DSI_COMPLEXIO_CFG1);
1895 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1896 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1897 DUMPREG(DSI_CLK_CTRL);
1898 DUMPREG(DSI_TIMING1);
1899 DUMPREG(DSI_TIMING2);
1900 DUMPREG(DSI_VM_TIMING1);
1901 DUMPREG(DSI_VM_TIMING2);
1902 DUMPREG(DSI_VM_TIMING3);
1903 DUMPREG(DSI_CLK_TIMING);
1904 DUMPREG(DSI_TX_FIFO_VC_SIZE);
1905 DUMPREG(DSI_RX_FIFO_VC_SIZE);
1906 DUMPREG(DSI_COMPLEXIO_CFG2);
1907 DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1908 DUMPREG(DSI_VM_TIMING4);
1909 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1910 DUMPREG(DSI_VM_TIMING5);
1911 DUMPREG(DSI_VM_TIMING6);
1912 DUMPREG(DSI_VM_TIMING7);
1913 DUMPREG(DSI_STOPCLK_TIMING);
1915 DUMPREG(DSI_VC_CTRL(0));
1916 DUMPREG(DSI_VC_TE(0));
1917 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1918 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1919 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1920 DUMPREG(DSI_VC_IRQSTATUS(0));
1921 DUMPREG(DSI_VC_IRQENABLE(0));
1923 DUMPREG(DSI_VC_CTRL(1));
1924 DUMPREG(DSI_VC_TE(1));
1925 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1926 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1927 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1928 DUMPREG(DSI_VC_IRQSTATUS(1));
1929 DUMPREG(DSI_VC_IRQENABLE(1));
1931 DUMPREG(DSI_VC_CTRL(2));
1932 DUMPREG(DSI_VC_TE(2));
1933 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1934 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1935 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1936 DUMPREG(DSI_VC_IRQSTATUS(2));
1937 DUMPREG(DSI_VC_IRQENABLE(2));
1939 DUMPREG(DSI_VC_CTRL(3));
1940 DUMPREG(DSI_VC_TE(3));
1941 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1942 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1943 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1944 DUMPREG(DSI_VC_IRQSTATUS(3));
1945 DUMPREG(DSI_VC_IRQENABLE(3));
1947 DUMPREG(DSI_DSIPHY_CFG0);
1948 DUMPREG(DSI_DSIPHY_CFG1);
1949 DUMPREG(DSI_DSIPHY_CFG2);
1950 DUMPREG(DSI_DSIPHY_CFG5);
1952 DUMPREG(DSI_PLL_CONTROL);
1953 DUMPREG(DSI_PLL_STATUS);
1954 DUMPREG(DSI_PLL_GO);
1955 DUMPREG(DSI_PLL_CONFIGURATION1);
1956 DUMPREG(DSI_PLL_CONFIGURATION2);
1958 dsi_disable_scp_clk(dsidev);
1959 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
1963 static void dsi1_dump_regs(struct seq_file *s)
1965 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1967 dsi_dump_dsidev_regs(dsidev, s);
1970 static void dsi2_dump_regs(struct seq_file *s)
1972 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1974 dsi_dump_dsidev_regs(dsidev, s);
1977 void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
1978 const struct file_operations *debug_fops)
1980 struct platform_device *dsidev;
1982 dsidev = dsi_get_dsidev_from_id(0);
1984 debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
1985 &dsi1_dump_regs, debug_fops);
1987 dsidev = dsi_get_dsidev_from_id(1);
1989 debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
1990 &dsi2_dump_regs, debug_fops);
1992 enum dsi_cio_power_state {
1993 DSI_COMPLEXIO_POWER_OFF = 0x0,
1994 DSI_COMPLEXIO_POWER_ON = 0x1,
1995 DSI_COMPLEXIO_POWER_ULPS = 0x2,
1998 static int dsi_cio_power(struct platform_device *dsidev,
1999 enum dsi_cio_power_state state)
2004 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
2007 while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
2010 DSSERR("failed to set complexio power state to "
2020 /* Number of data lanes present on DSI interface */
2021 static inline int dsi_get_num_data_lanes(struct platform_device *dsidev)
2023 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
2024 * of data lanes as 2 by default */
2025 if (dss_has_feature(FEAT_DSI_GNQ))
2026 return REG_GET(dsidev, DSI_GNQ, 11, 9); /* NB_DATA_LANES */
2031 /* Number of data lanes used by the dss device */
2032 static inline int dsi_get_num_data_lanes_dssdev(struct omap_dss_device *dssdev)
2034 int num_data_lanes = 0;
2036 if (dssdev->phy.dsi.data1_lane != 0)
2038 if (dssdev->phy.dsi.data2_lane != 0)
2040 if (dssdev->phy.dsi.data3_lane != 0)
2042 if (dssdev->phy.dsi.data4_lane != 0)
2045 return num_data_lanes;
2048 static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2052 /* line buffer on OMAP3 is 1024 x 24bits */
2053 /* XXX: for some reason using full buffer size causes
2054 * considerable TX slowdown with update sizes that fill the
2056 if (!dss_has_feature(FEAT_DSI_GNQ))
2059 val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
2063 return 512 * 3; /* 512x24 bits */
2065 return 682 * 3; /* 682x24 bits */
2067 return 853 * 3; /* 853x24 bits */
2069 return 1024 * 3; /* 1024x24 bits */
2071 return 1194 * 3; /* 1194x24 bits */
2073 return 1365 * 3; /* 1365x24 bits */
2079 static void dsi_set_lane_config(struct omap_dss_device *dssdev)
2081 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2083 int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
2085 int clk_lane = dssdev->phy.dsi.clk_lane;
2086 int data1_lane = dssdev->phy.dsi.data1_lane;
2087 int data2_lane = dssdev->phy.dsi.data2_lane;
2088 int clk_pol = dssdev->phy.dsi.clk_pol;
2089 int data1_pol = dssdev->phy.dsi.data1_pol;
2090 int data2_pol = dssdev->phy.dsi.data2_pol;
2092 r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
2093 r = FLD_MOD(r, clk_lane, 2, 0);
2094 r = FLD_MOD(r, clk_pol, 3, 3);
2095 r = FLD_MOD(r, data1_lane, 6, 4);
2096 r = FLD_MOD(r, data1_pol, 7, 7);
2097 r = FLD_MOD(r, data2_lane, 10, 8);
2098 r = FLD_MOD(r, data2_pol, 11, 11);
2099 if (num_data_lanes_dssdev > 2) {
2100 int data3_lane = dssdev->phy.dsi.data3_lane;
2101 int data3_pol = dssdev->phy.dsi.data3_pol;
2103 r = FLD_MOD(r, data3_lane, 14, 12);
2104 r = FLD_MOD(r, data3_pol, 15, 15);
2106 if (num_data_lanes_dssdev > 3) {
2107 int data4_lane = dssdev->phy.dsi.data4_lane;
2108 int data4_pol = dssdev->phy.dsi.data4_pol;
2110 r = FLD_MOD(r, data4_lane, 18, 16);
2111 r = FLD_MOD(r, data4_pol, 19, 19);
2113 dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
2115 /* The configuration of the DSI complex I/O (number of data lanes,
2116 position, differential order) should not be changed while
2117 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
2118 the hardware to take into account a new configuration of the complex
2119 I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
2120 follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
2121 then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
2122 DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
2123 DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
2124 DSI complex I/O configuration is unknown. */
2127 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
2128 REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0);
2129 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20);
2130 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
2134 static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
2136 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2138 /* convert time in ns to ddr ticks, rounding up */
2139 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2140 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
2143 static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
2145 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2147 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2148 return ddr * 1000 * 1000 / (ddr_clk / 1000);
2151 static void dsi_cio_timings(struct platform_device *dsidev)
2154 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
2155 u32 tlpx_half, tclk_trail, tclk_zero;
2158 /* calculate timings */
2160 /* 1 * DDR_CLK = 2 * UI */
2162 /* min 40ns + 4*UI max 85ns + 6*UI */
2163 ths_prepare = ns2ddr(dsidev, 70) + 2;
2165 /* min 145ns + 10*UI */
2166 ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
2168 /* min max(8*UI, 60ns+4*UI) */
2169 ths_trail = ns2ddr(dsidev, 60) + 5;
2172 ths_exit = ns2ddr(dsidev, 145);
2175 tlpx_half = ns2ddr(dsidev, 25);
2178 tclk_trail = ns2ddr(dsidev, 60) + 2;
2180 /* min 38ns, max 95ns */
2181 tclk_prepare = ns2ddr(dsidev, 65);
2183 /* min tclk-prepare + tclk-zero = 300ns */
2184 tclk_zero = ns2ddr(dsidev, 260);
2186 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
2187 ths_prepare, ddr2ns(dsidev, ths_prepare),
2188 ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
2189 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
2190 ths_trail, ddr2ns(dsidev, ths_trail),
2191 ths_exit, ddr2ns(dsidev, ths_exit));
2193 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
2194 "tclk_zero %u (%uns)\n",
2195 tlpx_half, ddr2ns(dsidev, tlpx_half),
2196 tclk_trail, ddr2ns(dsidev, tclk_trail),
2197 tclk_zero, ddr2ns(dsidev, tclk_zero));
2198 DSSDBG("tclk_prepare %u (%uns)\n",
2199 tclk_prepare, ddr2ns(dsidev, tclk_prepare));
2201 /* program timings */
2203 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
2204 r = FLD_MOD(r, ths_prepare, 31, 24);
2205 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
2206 r = FLD_MOD(r, ths_trail, 15, 8);
2207 r = FLD_MOD(r, ths_exit, 7, 0);
2208 dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
2210 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2211 r = FLD_MOD(r, tlpx_half, 22, 16);
2212 r = FLD_MOD(r, tclk_trail, 15, 8);
2213 r = FLD_MOD(r, tclk_zero, 7, 0);
2214 dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
2216 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
2217 r = FLD_MOD(r, tclk_prepare, 7, 0);
2218 dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
2221 static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev,
2222 enum dsi_lane lanes)
2224 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2225 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2226 int clk_lane = dssdev->phy.dsi.clk_lane;
2227 int data1_lane = dssdev->phy.dsi.data1_lane;
2228 int data2_lane = dssdev->phy.dsi.data2_lane;
2229 int data3_lane = dssdev->phy.dsi.data3_lane;
2230 int data4_lane = dssdev->phy.dsi.data4_lane;
2231 int clk_pol = dssdev->phy.dsi.clk_pol;
2232 int data1_pol = dssdev->phy.dsi.data1_pol;
2233 int data2_pol = dssdev->phy.dsi.data2_pol;
2234 int data3_pol = dssdev->phy.dsi.data3_pol;
2235 int data4_pol = dssdev->phy.dsi.data4_pol;
2238 u8 lptxscp_start = dsi->num_data_lanes == 2 ? 22 : 26;
2240 if (lanes & DSI_CLK_P)
2241 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 0 : 1));
2242 if (lanes & DSI_CLK_N)
2243 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 1 : 0));
2245 if (lanes & DSI_DATA1_P)
2246 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 0 : 1));
2247 if (lanes & DSI_DATA1_N)
2248 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 1 : 0));
2250 if (lanes & DSI_DATA2_P)
2251 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 0 : 1));
2252 if (lanes & DSI_DATA2_N)
2253 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 1 : 0));
2255 if (lanes & DSI_DATA3_P)
2256 l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 0 : 1));
2257 if (lanes & DSI_DATA3_N)
2258 l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 1 : 0));
2260 if (lanes & DSI_DATA4_P)
2261 l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 0 : 1));
2262 if (lanes & DSI_DATA4_N)
2263 l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 1 : 0));
2265 * Bits in REGLPTXSCPDAT4TO0DXDY:
2273 /* Set the lane override configuration */
2275 /* REGLPTXSCPDAT4TO0DXDY */
2276 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
2278 /* Enable lane override */
2281 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2284 static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2286 /* Disable lane override */
2287 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2288 /* Reset the lane override configuration */
2289 /* REGLPTXSCPDAT4TO0DXDY */
2290 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2293 static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
2295 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2300 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
2314 if (dssdev->phy.dsi.clk_lane != 0)
2315 in_use[dssdev->phy.dsi.clk_lane - 1] = true;
2316 if (dssdev->phy.dsi.data1_lane != 0)
2317 in_use[dssdev->phy.dsi.data1_lane - 1] = true;
2318 if (dssdev->phy.dsi.data2_lane != 0)
2319 in_use[dssdev->phy.dsi.data2_lane - 1] = true;
2327 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2330 for (i = 0; i < 3; ++i) {
2331 if (!in_use[i] || (l & (1 << bits[i])))
2339 for (i = 0; i < 3; ++i) {
2340 if (!in_use[i] || (l & (1 << bits[i])))
2343 DSSERR("CIO TXCLKESC%d domain not coming " \
2344 "out of reset\n", i);
2353 static int dsi_cio_init(struct omap_dss_device *dssdev)
2355 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2356 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2358 int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
2363 if (dsi->dsi_mux_pads)
2364 dsi->dsi_mux_pads(true);
2366 dsi_enable_scp_clk(dsidev);
2368 /* A dummy read using the SCP interface to any DSIPHY register is
2369 * required after DSIPHY reset to complete the reset of the DSI complex
2371 dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2373 if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
2374 DSSERR("CIO SCP Clock domain not coming out of reset.\n");
2376 goto err_scp_clk_dom;
2379 dsi_set_lane_config(dssdev);
2381 /* set TX STOP MODE timer to maximum for this operation */
2382 l = dsi_read_reg(dsidev, DSI_TIMING1);
2383 l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2384 l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
2385 l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
2386 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
2387 dsi_write_reg(dsidev, DSI_TIMING1, l);
2389 if (dsi->ulps_enabled) {
2390 u32 lane_mask = DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P;
2392 DSSDBG("manual ulps exit\n");
2394 /* ULPS is exited by Mark-1 state for 1ms, followed by
2395 * stop state. DSS HW cannot do this via the normal
2396 * ULPS exit sequence, as after reset the DSS HW thinks
2397 * that we are not in ULPS mode, and refuses to send the
2398 * sequence. So we need to send the ULPS exit sequence
2402 if (num_data_lanes_dssdev > 2)
2403 lane_mask |= DSI_DATA3_P;
2405 if (num_data_lanes_dssdev > 3)
2406 lane_mask |= DSI_DATA4_P;
2408 dsi_cio_enable_lane_override(dssdev, lane_mask);
2411 r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
2415 if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
2416 DSSERR("CIO PWR clock domain not coming out of reset.\n");
2418 goto err_cio_pwr_dom;
2421 dsi_if_enable(dsidev, true);
2422 dsi_if_enable(dsidev, false);
2423 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
2425 r = dsi_cio_wait_tx_clk_esc_reset(dssdev);
2427 goto err_tx_clk_esc_rst;
2429 if (dsi->ulps_enabled) {
2430 /* Keep Mark-1 state for 1ms (as per DSI spec) */
2431 ktime_t wait = ns_to_ktime(1000 * 1000);
2432 set_current_state(TASK_UNINTERRUPTIBLE);
2433 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
2435 /* Disable the override. The lanes should be set to Mark-11
2436 * state by the HW */
2437 dsi_cio_disable_lane_override(dsidev);
2440 /* FORCE_TX_STOP_MODE_IO */
2441 REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
2443 dsi_cio_timings(dsidev);
2445 dsi->ulps_enabled = false;
2447 DSSDBG("CIO init done\n");
2452 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2454 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2456 if (dsi->ulps_enabled)
2457 dsi_cio_disable_lane_override(dsidev);
2459 dsi_disable_scp_clk(dsidev);
2460 if (dsi->dsi_mux_pads)
2461 dsi->dsi_mux_pads(false);
2465 static void dsi_cio_uninit(struct platform_device *dsidev)
2467 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2469 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2470 dsi_disable_scp_clk(dsidev);
2471 if (dsi->dsi_mux_pads)
2472 dsi->dsi_mux_pads(false);
2475 static int _dsi_wait_reset(struct platform_device *dsidev)
2479 while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
2481 DSSERR("soft reset failed\n");
2490 static int _dsi_reset(struct platform_device *dsidev)
2493 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
2494 return _dsi_wait_reset(dsidev);
2497 static void dsi_config_tx_fifo(struct platform_device *dsidev,
2498 enum fifo_size size1, enum fifo_size size2,
2499 enum fifo_size size3, enum fifo_size size4)
2501 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2506 dsi->vc[0].fifo_size = size1;
2507 dsi->vc[1].fifo_size = size2;
2508 dsi->vc[2].fifo_size = size3;
2509 dsi->vc[3].fifo_size = size4;
2511 for (i = 0; i < 4; i++) {
2513 int size = dsi->vc[i].fifo_size;
2515 if (add + size > 4) {
2516 DSSERR("Illegal FIFO configuration\n");
2520 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2522 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
2526 dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
2529 static void dsi_config_rx_fifo(struct platform_device *dsidev,
2530 enum fifo_size size1, enum fifo_size size2,
2531 enum fifo_size size3, enum fifo_size size4)
2533 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2538 dsi->vc[0].fifo_size = size1;
2539 dsi->vc[1].fifo_size = size2;
2540 dsi->vc[2].fifo_size = size3;
2541 dsi->vc[3].fifo_size = size4;
2543 for (i = 0; i < 4; i++) {
2545 int size = dsi->vc[i].fifo_size;
2547 if (add + size > 4) {
2548 DSSERR("Illegal FIFO configuration\n");
2552 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2554 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
2558 dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2561 static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2565 r = dsi_read_reg(dsidev, DSI_TIMING1);
2566 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2567 dsi_write_reg(dsidev, DSI_TIMING1, r);
2569 if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2570 DSSERR("TX_STOP bit not going down\n");
2577 static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2579 return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2582 static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2584 struct dsi_packet_sent_handler_data *vp_data =
2585 (struct dsi_packet_sent_handler_data *) data;
2586 struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2587 const int channel = dsi->update_channel;
2588 u8 bit = dsi->te_enabled ? 30 : 31;
2590 if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2591 complete(vp_data->completion);
2594 static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2596 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2597 DECLARE_COMPLETION_ONSTACK(completion);
2598 struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion };
2602 bit = dsi->te_enabled ? 30 : 31;
2604 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2605 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2609 /* Wait for completion only if TE_EN/TE_START is still set */
2610 if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2611 if (wait_for_completion_timeout(&completion,
2612 msecs_to_jiffies(10)) == 0) {
2613 DSSERR("Failed to complete previous frame transfer\n");
2619 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2620 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2624 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2625 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2630 static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2632 struct dsi_packet_sent_handler_data *l4_data =
2633 (struct dsi_packet_sent_handler_data *) data;
2634 struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2635 const int channel = dsi->update_channel;
2637 if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2638 complete(l4_data->completion);
2641 static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2643 DECLARE_COMPLETION_ONSTACK(completion);
2644 struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion };
2647 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2648 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2652 /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2653 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2654 if (wait_for_completion_timeout(&completion,
2655 msecs_to_jiffies(10)) == 0) {
2656 DSSERR("Failed to complete previous l4 transfer\n");
2662 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2663 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2667 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2668 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2673 static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2675 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2677 WARN_ON(!dsi_bus_is_locked(dsidev));
2679 WARN_ON(in_interrupt());
2681 if (!dsi_vc_is_enabled(dsidev, channel))
2684 switch (dsi->vc[channel].mode) {
2685 case DSI_VC_MODE_VP:
2686 return dsi_sync_vc_vp(dsidev, channel);
2687 case DSI_VC_MODE_L4:
2688 return dsi_sync_vc_l4(dsidev, channel);
2694 static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2697 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2700 enable = enable ? 1 : 0;
2702 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2704 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2705 0, enable) != enable) {
2706 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2713 static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2717 DSSDBGF("%d", channel);
2719 r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2721 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2722 DSSERR("VC(%d) busy when trying to configure it!\n",
2725 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
2726 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
2727 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
2728 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
2729 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2730 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2731 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2732 if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
2733 r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
2735 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2736 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2738 dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2741 static int dsi_vc_config_l4(struct platform_device *dsidev, int channel)
2743 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2745 if (dsi->vc[channel].mode == DSI_VC_MODE_L4)
2748 DSSDBGF("%d", channel);
2750 dsi_sync_vc(dsidev, channel);
2752 dsi_vc_enable(dsidev, channel, 0);
2755 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2756 DSSERR("vc(%d) busy when trying to config for L4\n", channel);
2760 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
2762 /* DCS_CMD_ENABLE */
2763 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2764 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 30, 30);
2766 dsi_vc_enable(dsidev, channel, 1);
2768 dsi->vc[channel].mode = DSI_VC_MODE_L4;
2773 static int dsi_vc_config_vp(struct platform_device *dsidev, int channel)
2775 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2777 if (dsi->vc[channel].mode == DSI_VC_MODE_VP)
2780 DSSDBGF("%d", channel);
2782 dsi_sync_vc(dsidev, channel);
2784 dsi_vc_enable(dsidev, channel, 0);
2787 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2788 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2792 /* SOURCE, 1 = video port */
2793 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 1, 1);
2795 /* DCS_CMD_ENABLE */
2796 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2797 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 30, 30);
2799 dsi_vc_enable(dsidev, channel, 1);
2801 dsi->vc[channel].mode = DSI_VC_MODE_VP;
2807 void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2810 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2812 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2814 WARN_ON(!dsi_bus_is_locked(dsidev));
2816 dsi_vc_enable(dsidev, channel, 0);
2817 dsi_if_enable(dsidev, 0);
2819 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2821 dsi_vc_enable(dsidev, channel, 1);
2822 dsi_if_enable(dsidev, 1);
2824 dsi_force_tx_stop_mode_io(dsidev);
2826 EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
2828 static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2830 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2832 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2833 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2837 (val >> 24) & 0xff);
2841 static void dsi_show_rx_ack_with_err(u16 err)
2843 DSSERR("\tACK with ERROR (%#x):\n", err);
2845 DSSERR("\t\tSoT Error\n");
2847 DSSERR("\t\tSoT Sync Error\n");
2849 DSSERR("\t\tEoT Sync Error\n");
2851 DSSERR("\t\tEscape Mode Entry Command Error\n");
2853 DSSERR("\t\tLP Transmit Sync Error\n");
2855 DSSERR("\t\tHS Receive Timeout Error\n");
2857 DSSERR("\t\tFalse Control Error\n");
2859 DSSERR("\t\t(reserved7)\n");
2861 DSSERR("\t\tECC Error, single-bit (corrected)\n");
2863 DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
2864 if (err & (1 << 10))
2865 DSSERR("\t\tChecksum Error\n");
2866 if (err & (1 << 11))
2867 DSSERR("\t\tData type not recognized\n");
2868 if (err & (1 << 12))
2869 DSSERR("\t\tInvalid VC ID\n");
2870 if (err & (1 << 13))
2871 DSSERR("\t\tInvalid Transmission Length\n");
2872 if (err & (1 << 14))
2873 DSSERR("\t\t(reserved14)\n");
2874 if (err & (1 << 15))
2875 DSSERR("\t\tDSI Protocol Violation\n");
2878 static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2881 /* RX_FIFO_NOT_EMPTY */
2882 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2885 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2886 DSSERR("\trawval %#08x\n", val);
2887 dt = FLD_GET(val, 5, 0);
2888 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
2889 u16 err = FLD_GET(val, 23, 8);
2890 dsi_show_rx_ack_with_err(err);
2891 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2892 DSSERR("\tDCS short response, 1 byte: %#x\n",
2893 FLD_GET(val, 23, 8));
2894 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2895 DSSERR("\tDCS short response, 2 byte: %#x\n",
2896 FLD_GET(val, 23, 8));
2897 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2898 DSSERR("\tDCS long response, len %d\n",
2899 FLD_GET(val, 23, 8));
2900 dsi_vc_flush_long_data(dsidev, channel);
2902 DSSERR("\tunknown datatype 0x%02x\n", dt);
2908 static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2910 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2912 if (dsi->debug_write || dsi->debug_read)
2913 DSSDBG("dsi_vc_send_bta %d\n", channel);
2915 WARN_ON(!dsi_bus_is_locked(dsidev));
2917 /* RX_FIFO_NOT_EMPTY */
2918 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2919 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2920 dsi_vc_flush_receive_data(dsidev, channel);
2923 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2928 int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2930 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2931 DECLARE_COMPLETION_ONSTACK(completion);
2935 r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2936 &completion, DSI_VC_IRQ_BTA);
2940 r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2941 DSI_IRQ_ERROR_MASK);
2945 r = dsi_vc_send_bta(dsidev, channel);
2949 if (wait_for_completion_timeout(&completion,
2950 msecs_to_jiffies(500)) == 0) {
2951 DSSERR("Failed to receive BTA\n");
2956 err = dsi_get_errors(dsidev);
2958 DSSERR("Error while sending BTA: %x\n", err);
2963 dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
2964 DSI_IRQ_ERROR_MASK);
2966 dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
2967 &completion, DSI_VC_IRQ_BTA);
2971 EXPORT_SYMBOL(dsi_vc_send_bta_sync);
2973 static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
2974 int channel, u8 data_type, u16 len, u8 ecc)
2976 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2980 WARN_ON(!dsi_bus_is_locked(dsidev));
2982 data_id = data_type | dsi->vc[channel].vc_id << 6;
2984 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2985 FLD_VAL(ecc, 31, 24);
2987 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
2990 static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
2991 int channel, u8 b1, u8 b2, u8 b3, u8 b4)
2995 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
2997 /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
2998 b1, b2, b3, b4, val); */
3000 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
3003 static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
3004 u8 data_type, u8 *data, u16 len, u8 ecc)
3007 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3013 if (dsi->debug_write)
3014 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
3017 if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
3018 DSSERR("unable to send long packet: packet too long.\n");
3022 dsi_vc_config_l4(dsidev, channel);
3024 dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
3027 for (i = 0; i < len >> 2; i++) {
3028 if (dsi->debug_write)
3029 DSSDBG("\tsending full packet %d\n", i);
3036 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
3041 b1 = 0; b2 = 0; b3 = 0;
3043 if (dsi->debug_write)
3044 DSSDBG("\tsending remainder bytes %d\n", i);
3061 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
3067 static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
3068 u8 data_type, u16 data, u8 ecc)
3070 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3074 WARN_ON(!dsi_bus_is_locked(dsidev));
3076 if (dsi->debug_write)
3077 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
3079 data_type, data & 0xff, (data >> 8) & 0xff);
3081 dsi_vc_config_l4(dsidev, channel);
3083 if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
3084 DSSERR("ERROR FIFO FULL, aborting transfer\n");
3088 data_id = data_type | dsi->vc[channel].vc_id << 6;
3090 r = (data_id << 0) | (data << 8) | (ecc << 24);
3092 dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
3097 int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
3099 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3100 u8 nullpkg[] = {0, 0, 0, 0};
3102 return dsi_vc_send_long(dsidev, channel, DSI_DT_NULL_PACKET, nullpkg,
3105 EXPORT_SYMBOL(dsi_vc_send_null);
3107 int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
3110 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3116 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_0,
3118 } else if (len == 2) {
3119 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_1,
3120 data[0] | (data[1] << 8), 0);
3122 /* 0x39 = DCS Long Write */
3123 r = dsi_vc_send_long(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
3129 EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
3131 int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3134 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3137 r = dsi_vc_dcs_write_nosync(dssdev, channel, data, len);
3141 r = dsi_vc_send_bta_sync(dssdev, channel);
3145 /* RX_FIFO_NOT_EMPTY */
3146 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
3147 DSSERR("rx fifo not empty after write, dumping data:\n");
3148 dsi_vc_flush_receive_data(dsidev, channel);
3155 DSSERR("dsi_vc_dcs_write(ch %d, cmd 0x%02x, len %d) failed\n",
3156 channel, data[0], len);
3159 EXPORT_SYMBOL(dsi_vc_dcs_write);
3161 int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
3163 return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
3165 EXPORT_SYMBOL(dsi_vc_dcs_write_0);
3167 int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3173 return dsi_vc_dcs_write(dssdev, channel, buf, 2);
3175 EXPORT_SYMBOL(dsi_vc_dcs_write_1);
3177 int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3178 u8 *buf, int buflen)
3180 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3181 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3186 if (dsi->debug_read)
3187 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
3189 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0);
3193 r = dsi_vc_send_bta_sync(dssdev, channel);
3197 /* RX_FIFO_NOT_EMPTY */
3198 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
3199 DSSERR("RX fifo empty when trying to read.\n");
3204 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
3205 if (dsi->debug_read)
3206 DSSDBG("\theader: %08x\n", val);
3207 dt = FLD_GET(val, 5, 0);
3208 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
3209 u16 err = FLD_GET(val, 23, 8);
3210 dsi_show_rx_ack_with_err(err);
3214 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
3215 u8 data = FLD_GET(val, 15, 8);
3216 if (dsi->debug_read)
3217 DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
3227 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
3228 u16 data = FLD_GET(val, 23, 8);
3229 if (dsi->debug_read)
3230 DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
3237 buf[0] = data & 0xff;
3238 buf[1] = (data >> 8) & 0xff;
3241 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
3243 int len = FLD_GET(val, 23, 8);
3244 if (dsi->debug_read)
3245 DSSDBG("\tDCS long response, len %d\n", len);
3252 /* two byte checksum ends the packet, not included in len */
3253 for (w = 0; w < len + 2;) {
3255 val = dsi_read_reg(dsidev,
3256 DSI_VC_SHORT_PACKET_HEADER(channel));
3257 if (dsi->debug_read)
3258 DSSDBG("\t\t%02x %02x %02x %02x\n",
3262 (val >> 24) & 0xff);
3264 for (b = 0; b < 4; ++b) {
3266 buf[w] = (val >> (b * 8)) & 0xff;
3267 /* we discard the 2 byte checksum */
3274 DSSERR("\tunknown datatype 0x%02x\n", dt);
3281 DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n",
3286 EXPORT_SYMBOL(dsi_vc_dcs_read);
3288 int dsi_vc_dcs_read_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3293 r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, data, 1);
3303 EXPORT_SYMBOL(dsi_vc_dcs_read_1);
3305 int dsi_vc_dcs_read_2(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3306 u8 *data1, u8 *data2)
3311 r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, buf, 2);
3324 EXPORT_SYMBOL(dsi_vc_dcs_read_2);
3326 int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3329 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3331 return dsi_vc_send_short(dsidev, channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
3334 EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
3336 static int dsi_enter_ulps(struct platform_device *dsidev)
3338 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3339 DECLARE_COMPLETION_ONSTACK(completion);
3344 WARN_ON(!dsi_bus_is_locked(dsidev));
3346 WARN_ON(dsi->ulps_enabled);
3348 if (dsi->ulps_enabled)
3351 if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3352 DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n");
3356 dsi_sync_vc(dsidev, 0);
3357 dsi_sync_vc(dsidev, 1);
3358 dsi_sync_vc(dsidev, 2);
3359 dsi_sync_vc(dsidev, 3);
3361 dsi_force_tx_stop_mode_io(dsidev);
3363 dsi_vc_enable(dsidev, 0, false);
3364 dsi_vc_enable(dsidev, 1, false);
3365 dsi_vc_enable(dsidev, 2, false);
3366 dsi_vc_enable(dsidev, 3, false);
3368 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
3369 DSSERR("HS busy when enabling ULPS\n");
3373 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
3374 DSSERR("LP busy when enabling ULPS\n");
3378 r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3379 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3383 /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3384 /* LANEx_ULPS_SIG2 */
3385 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2),
3388 if (wait_for_completion_timeout(&completion,
3389 msecs_to_jiffies(1000)) == 0) {
3390 DSSERR("ULPS enable timeout\n");
3395 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3396 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3398 /* Reset LANEx_ULPS_SIG2 */
3399 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2),
3402 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3404 dsi_if_enable(dsidev, false);
3406 dsi->ulps_enabled = true;
3411 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3412 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3416 static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3417 unsigned ticks, bool x4, bool x16)
3420 unsigned long total_ticks;
3423 BUG_ON(ticks > 0x1fff);
3425 /* ticks in DSI_FCK */
3426 fck = dsi_fclk_rate(dsidev);
3428 r = dsi_read_reg(dsidev, DSI_TIMING2);
3429 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
3430 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
3431 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
3432 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
3433 dsi_write_reg(dsidev, DSI_TIMING2, r);
3435 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3437 DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3439 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3440 (total_ticks * 1000) / (fck / 1000 / 1000));
3443 static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3447 unsigned long total_ticks;
3450 BUG_ON(ticks > 0x1fff);
3452 /* ticks in DSI_FCK */
3453 fck = dsi_fclk_rate(dsidev);
3455 r = dsi_read_reg(dsidev, DSI_TIMING1);
3456 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
3457 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
3458 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
3459 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
3460 dsi_write_reg(dsidev, DSI_TIMING1, r);
3462 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
3464 DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
3466 ticks, x8 ? " x8" : "", x16 ? " x16" : "",
3467 (total_ticks * 1000) / (fck / 1000 / 1000));
3470 static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3471 unsigned ticks, bool x4, bool x16)
3474 unsigned long total_ticks;
3477 BUG_ON(ticks > 0x1fff);
3479 /* ticks in DSI_FCK */
3480 fck = dsi_fclk_rate(dsidev);
3482 r = dsi_read_reg(dsidev, DSI_TIMING1);
3483 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
3484 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
3485 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
3486 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
3487 dsi_write_reg(dsidev, DSI_TIMING1, r);
3489 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3491 DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
3493 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3494 (total_ticks * 1000) / (fck / 1000 / 1000));
3497 static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3498 unsigned ticks, bool x4, bool x16)
3501 unsigned long total_ticks;
3504 BUG_ON(ticks > 0x1fff);
3506 /* ticks in TxByteClkHS */
3507 fck = dsi_get_txbyteclkhs(dsidev);
3509 r = dsi_read_reg(dsidev, DSI_TIMING2);
3510 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
3511 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
3512 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
3513 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
3514 dsi_write_reg(dsidev, DSI_TIMING2, r);
3516 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3518 DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3520 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3521 (total_ticks * 1000) / (fck / 1000 / 1000));
3523 static int dsi_proto_config(struct omap_dss_device *dssdev)
3525 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3529 dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
3534 dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
3539 /* XXX what values for the timeouts? */
3540 dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
3541 dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
3542 dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
3543 dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
3545 switch (dssdev->ctrl.pixel_size) {
3559 r = dsi_read_reg(dsidev, DSI_CTRL);
3560 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
3561 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
3562 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
3563 r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
3564 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
3565 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
3566 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
3567 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
3568 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
3569 if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
3570 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
3571 /* DCS_CMD_CODE, 1=start, 0=continue */
3572 r = FLD_MOD(r, 0, 25, 25);
3575 dsi_write_reg(dsidev, DSI_CTRL, r);
3577 dsi_vc_initial_config(dsidev, 0);
3578 dsi_vc_initial_config(dsidev, 1);
3579 dsi_vc_initial_config(dsidev, 2);
3580 dsi_vc_initial_config(dsidev, 3);
3585 static void dsi_proto_timings(struct omap_dss_device *dssdev)
3587 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3588 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
3589 unsigned tclk_pre, tclk_post;
3590 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
3591 unsigned ths_trail, ths_exit;
3592 unsigned ddr_clk_pre, ddr_clk_post;
3593 unsigned enter_hs_mode_lat, exit_hs_mode_lat;
3597 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3598 ths_prepare = FLD_GET(r, 31, 24);
3599 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
3600 ths_zero = ths_prepare_ths_zero - ths_prepare;
3601 ths_trail = FLD_GET(r, 15, 8);
3602 ths_exit = FLD_GET(r, 7, 0);
3604 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3605 tlpx = FLD_GET(r, 22, 16) * 2;
3606 tclk_trail = FLD_GET(r, 15, 8);
3607 tclk_zero = FLD_GET(r, 7, 0);
3609 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
3610 tclk_prepare = FLD_GET(r, 7, 0);
3614 /* min 60ns + 52*UI */
3615 tclk_post = ns2ddr(dsidev, 60) + 26;
3617 ths_eot = DIV_ROUND_UP(4, dsi_get_num_data_lanes_dssdev(dssdev));
3619 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
3621 ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
3623 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
3624 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
3626 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3627 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
3628 r = FLD_MOD(r, ddr_clk_post, 7, 0);
3629 dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
3631 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
3635 enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
3636 DIV_ROUND_UP(ths_prepare, 4) +
3637 DIV_ROUND_UP(ths_zero + 3, 4);
3639 exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
3641 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
3642 FLD_VAL(exit_hs_mode_lat, 15, 0);
3643 dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
3645 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
3646 enter_hs_mode_lat, exit_hs_mode_lat);
3650 #define DSI_DECL_VARS \
3651 int __dsi_cb = 0; u32 __dsi_cv = 0;
3653 #define DSI_FLUSH(dsidev, ch) \
3654 if (__dsi_cb > 0) { \
3655 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
3656 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
3657 __dsi_cb = __dsi_cv = 0; \
3660 #define DSI_PUSH(dsidev, ch, data) \
3662 __dsi_cv |= (data) << (__dsi_cb * 8); \
3663 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
3664 if (++__dsi_cb > 3) \
3665 DSI_FLUSH(dsidev, ch); \
3668 static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
3669 int x, int y, int w, int h)
3671 /* Note: supports only 24bit colors in 32bit container */
3672 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3673 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3675 int fifo_stalls = 0;
3676 int max_dsi_packet_size;
3677 int max_data_per_packet;
3678 int max_pixels_per_packet;
3680 int bytespp = dssdev->ctrl.pixel_size / 8;
3686 struct omap_overlay *ovl;
3690 DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
3693 ovl = dssdev->manager->overlays[0];
3695 if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
3698 if (dssdev->ctrl.pixel_size != 24)
3701 scr_width = ovl->info.screen_width;
3702 data = ovl->info.vaddr;
3704 start_offset = scr_width * y + x;
3705 horiz_inc = scr_width - w;
3708 /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
3711 /* When using CPU, max long packet size is TX buffer size */
3712 max_dsi_packet_size = dsi->vc[0].fifo_size * 32 * 4;
3714 /* we seem to get better perf if we divide the tx fifo to half,
3715 and while the other half is being sent, we fill the other half
3716 max_dsi_packet_size /= 2; */
3718 max_data_per_packet = max_dsi_packet_size - 4 - 1;
3720 max_pixels_per_packet = max_data_per_packet / bytespp;
3722 DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
3724 pixels_left = w * h;
3726 DSSDBG("total pixels %d\n", pixels_left);
3728 data += start_offset;
3730 while (pixels_left > 0) {
3731 /* 0x2c = write_memory_start */
3732 /* 0x3c = write_memory_continue */
3733 u8 dcs_cmd = first ? 0x2c : 0x3c;
3739 /* using fifo not empty */
3740 /* TX_FIFO_NOT_EMPTY */
3741 while (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(0)), 5, 5)) {
3743 if (fifo_stalls > 0xfffff) {
3744 DSSERR("fifo stalls overflow, pixels left %d\n",
3746 dsi_if_enable(dsidev, 0);
3752 /* using fifo emptiness */
3753 while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
3754 max_dsi_packet_size) {
3756 if (fifo_stalls > 0xfffff) {
3757 DSSERR("fifo stalls overflow, pixels left %d\n",
3759 dsi_if_enable(dsidev, 0);
3764 while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS,
3765 7, 0) + 1) * 4 == 0) {
3767 if (fifo_stalls > 0xfffff) {
3768 DSSERR("fifo stalls overflow, pixels left %d\n",
3770 dsi_if_enable(dsidev, 0);
3775 pixels = min(max_pixels_per_packet, pixels_left);
3777 pixels_left -= pixels;
3779 dsi_vc_write_long_header(dsidev, 0, DSI_DT_DCS_LONG_WRITE,
3780 1 + pixels * bytespp, 0);
3782 DSI_PUSH(dsidev, 0, dcs_cmd);
3784 while (pixels-- > 0) {
3785 u32 pix = __raw_readl(data++);
3787 DSI_PUSH(dsidev, 0, (pix >> 16) & 0xff);
3788 DSI_PUSH(dsidev, 0, (pix >> 8) & 0xff);
3789 DSI_PUSH(dsidev, 0, (pix >> 0) & 0xff);
3792 if (current_x == x+w) {
3798 DSI_FLUSH(dsidev, 0);
3804 static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3805 u16 x, u16 y, u16 w, u16 h)
3807 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3808 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3813 unsigned packet_payload;
3814 unsigned packet_len;
3817 const unsigned channel = dsi->update_channel;
3818 const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
3820 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
3823 dsi_vc_config_vp(dsidev, channel);
3825 bytespp = dssdev->ctrl.pixel_size / 8;
3826 bytespl = w * bytespp;
3827 bytespf = bytespl * h;
3829 /* NOTE: packet_payload has to be equal to N * bytespl, where N is
3830 * number of lines in a packet. See errata about VP_CLK_RATIO */
3832 if (bytespf < line_buf_size)
3833 packet_payload = bytespf;
3835 packet_payload = (line_buf_size) / bytespl * bytespl;
3837 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
3838 total_len = (bytespf / packet_payload) * packet_len;
3840 if (bytespf % packet_payload)
3841 total_len += (bytespf % packet_payload) + 1;
3843 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
3844 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3846 dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
3849 if (dsi->te_enabled)
3850 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
3852 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
3853 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3855 /* We put SIDLEMODE to no-idle for the duration of the transfer,
3856 * because DSS interrupts are not capable of waking up the CPU and the
3857 * framedone interrupt could be delayed for quite a long time. I think
3858 * the same goes for any DSS interrupts, but for some reason I have not
3859 * seen the problem anywhere else than here.
3861 dispc_disable_sidle();
3863 dsi_perf_mark_start(dsidev);
3865 r = schedule_delayed_work(&dsi->framedone_timeout_work,
3866 msecs_to_jiffies(250));
3869 dss_start_update(dssdev);
3871 if (dsi->te_enabled) {
3872 /* disable LP_RX_TO, so that we can receive TE. Time to wait
3873 * for TE is longer than the timer allows */
3874 REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
3876 dsi_vc_send_bta(dsidev, channel);
3878 #ifdef DSI_CATCH_MISSING_TE
3879 mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
3884 #ifdef DSI_CATCH_MISSING_TE
3885 static void dsi_te_timeout(unsigned long arg)
3887 DSSERR("TE not received for 250ms!\n");
3891 static void dsi_handle_framedone(struct platform_device *dsidev, int error)
3893 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3895 /* SIDLEMODE back to smart-idle */
3896 dispc_enable_sidle();
3898 if (dsi->te_enabled) {
3899 /* enable LP_RX_TO again after the TE */
3900 REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
3903 dsi->framedone_callback(error, dsi->framedone_data);
3906 dsi_perf_show(dsidev, "DISPC");
3909 static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3911 struct dsi_data *dsi = container_of(work, struct dsi_data,
3912 framedone_timeout_work.work);
3913 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
3914 * 250ms which would conflict with this timeout work. What should be
3915 * done is first cancel the transfer on the HW, and then cancel the
3916 * possibly scheduled framedone work. However, cancelling the transfer
3917 * on the HW is buggy, and would probably require resetting the whole
3920 DSSERR("Framedone not received for 250ms!\n");
3922 dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
3925 static void dsi_framedone_irq_callback(void *data, u32 mask)
3927 struct omap_dss_device *dssdev = (struct omap_dss_device *) data;
3928 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3929 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3931 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
3932 * turns itself off. However, DSI still has the pixels in its buffers,
3933 * and is sending the data.
3936 __cancel_delayed_work(&dsi->framedone_timeout_work);
3938 dsi_handle_framedone(dsidev, 0);
3940 #ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3941 dispc_fake_vsync_irq();
3945 int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
3946 u16 *x, u16 *y, u16 *w, u16 *h,
3947 bool enlarge_update_area)
3949 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3952 dssdev->driver->get_resolution(dssdev, &dw, &dh);
3954 if (*x > dw || *y > dh)
3966 if (*w == 0 || *h == 0)
3969 dsi_perf_mark_setup(dsidev);
3971 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3972 dss_setup_partial_planes(dssdev, x, y, w, h,
3973 enlarge_update_area);
3974 dispc_set_lcd_size(dssdev->manager->id, *w, *h);
3979 EXPORT_SYMBOL(omap_dsi_prepare_update);
3981 int omap_dsi_update(struct omap_dss_device *dssdev,
3983 u16 x, u16 y, u16 w, u16 h,
3984 void (*callback)(int, void *), void *data)
3986 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3987 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3989 dsi->update_channel = channel;
3991 /* OMAP DSS cannot send updates of odd widths.
3992 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
3993 * here to make sure we catch erroneous updates. Otherwise we'll only
3994 * see rather obscure HW error happening, as DSS halts. */
3997 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3998 dsi->framedone_callback = callback;
3999 dsi->framedone_data = data;
4001 dsi->update_region.x = x;
4002 dsi->update_region.y = y;
4003 dsi->update_region.w = w;
4004 dsi->update_region.h = h;
4005 dsi->update_region.device = dssdev;
4007 dsi_update_screen_dispc(dssdev, x, y, w, h);
4011 r = dsi_update_screen_l4(dssdev, x, y, w, h);
4015 dsi_perf_show(dsidev, "L4");
4021 EXPORT_SYMBOL(omap_dsi_update);
4025 static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4030 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4031 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
4033 r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev,
4036 DSSERR("can't get FRAMEDONE irq\n");
4040 dispc_set_lcd_display_type(dssdev->manager->id,
4041 OMAP_DSS_LCD_DISPLAY_TFT);
4043 dispc_set_parallel_interface_mode(dssdev->manager->id,
4044 OMAP_DSS_PARALLELMODE_DSI);
4045 dispc_enable_fifohandcheck(dssdev->manager->id, 1);
4047 dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
4050 struct omap_video_timings timings = {
4059 dispc_set_lcd_timings(dssdev->manager->id, &timings);
4065 static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
4069 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4070 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
4072 omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev,
4076 static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
4078 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4079 struct dsi_clock_info cinfo;
4082 /* we always use DSS_CLK_SYSCK as input clock */
4083 cinfo.use_sys_clk = true;
4084 cinfo.regn = dssdev->clocks.dsi.regn;
4085 cinfo.regm = dssdev->clocks.dsi.regm;
4086 cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
4087 cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
4088 r = dsi_calc_clock_rates(dssdev, &cinfo);
4090 DSSERR("Failed to calc dsi clocks\n");
4094 r = dsi_pll_set_clock_div(dsidev, &cinfo);
4096 DSSERR("Failed to set dsi clocks\n");
4103 static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
4105 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4106 struct dispc_clock_info dispc_cinfo;
4108 unsigned long long fck;
4110 fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
4112 dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
4113 dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
4115 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
4117 DSSERR("Failed to calc dispc clocks\n");
4121 r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
4123 DSSERR("Failed to set dispc clocks\n");
4130 static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4132 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4133 int dsi_module = dsi_get_dsidev_id(dsidev);
4136 r = dsi_pll_init(dsidev, true, true);
4140 r = dsi_configure_dsi_clocks(dssdev);
4144 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
4145 dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
4146 dss_select_lcd_clk_source(dssdev->manager->id,
4147 dssdev->clocks.dispc.channel.lcd_clk_src);
4151 r = dsi_configure_dispc_clocks(dssdev);
4155 r = dsi_cio_init(dssdev);
4159 _dsi_print_reset_status(dsidev);
4161 dsi_proto_timings(dssdev);
4162 dsi_set_lp_clk_divisor(dssdev);
4165 _dsi_print_reset_status(dsidev);
4167 r = dsi_proto_config(dssdev);
4171 /* enable interface */
4172 dsi_vc_enable(dsidev, 0, 1);
4173 dsi_vc_enable(dsidev, 1, 1);
4174 dsi_vc_enable(dsidev, 2, 1);
4175 dsi_vc_enable(dsidev, 3, 1);
4176 dsi_if_enable(dsidev, 1);
4177 dsi_force_tx_stop_mode_io(dsidev);
4181 dsi_cio_uninit(dsidev);
4183 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4184 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4186 dsi_pll_uninit(dsidev, true);
4191 static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4192 bool disconnect_lanes, bool enter_ulps)
4194 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4195 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4196 int dsi_module = dsi_get_dsidev_id(dsidev);
4198 if (enter_ulps && !dsi->ulps_enabled)
4199 dsi_enter_ulps(dsidev);
4201 /* disable interface */
4202 dsi_if_enable(dsidev, 0);
4203 dsi_vc_enable(dsidev, 0, 0);
4204 dsi_vc_enable(dsidev, 1, 0);
4205 dsi_vc_enable(dsidev, 2, 0);
4206 dsi_vc_enable(dsidev, 3, 0);
4208 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4209 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4210 dsi_cio_uninit(dsidev);
4211 dsi_pll_uninit(dsidev, disconnect_lanes);
4214 static int dsi_core_init(struct platform_device *dsidev)
4217 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
4220 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
4222 /* SIDLEMODE smart-idle */
4223 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
4225 _dsi_initialize_irq(dsidev);
4230 int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
4232 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4233 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4236 DSSDBG("dsi_display_enable\n");
4238 WARN_ON(!dsi_bus_is_locked(dsidev));
4240 mutex_lock(&dsi->lock);
4242 r = omap_dss_start_device(dssdev);
4244 DSSERR("failed to start device\n");
4249 dsi_enable_pll_clock(dsidev, 1);
4251 r = _dsi_reset(dsidev);
4255 dsi_core_init(dsidev);
4257 r = dsi_display_init_dispc(dssdev);
4261 r = dsi_display_init_dsi(dssdev);
4265 mutex_unlock(&dsi->lock);
4270 dsi_display_uninit_dispc(dssdev);
4273 dsi_enable_pll_clock(dsidev, 0);
4274 omap_dss_stop_device(dssdev);
4276 mutex_unlock(&dsi->lock);
4277 DSSDBG("dsi_display_enable FAILED\n");
4280 EXPORT_SYMBOL(omapdss_dsi_display_enable);
4282 void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
4283 bool disconnect_lanes, bool enter_ulps)
4285 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4286 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4288 DSSDBG("dsi_display_disable\n");
4290 WARN_ON(!dsi_bus_is_locked(dsidev));
4292 mutex_lock(&dsi->lock);
4294 dsi_display_uninit_dispc(dssdev);
4296 dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
4299 dsi_enable_pll_clock(dsidev, 0);
4301 omap_dss_stop_device(dssdev);
4303 mutex_unlock(&dsi->lock);
4305 EXPORT_SYMBOL(omapdss_dsi_display_disable);
4307 int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4309 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4310 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4312 dsi->te_enabled = enable;
4315 EXPORT_SYMBOL(omapdss_dsi_enable_te);
4317 void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
4318 u32 fifo_size, enum omap_burst_size *burst_size,
4319 u32 *fifo_low, u32 *fifo_high)
4321 unsigned burst_size_bytes;
4323 *burst_size = OMAP_DSS_BURST_16x32;
4324 burst_size_bytes = 16 * 32 / 8;
4326 *fifo_high = fifo_size - burst_size_bytes;
4327 *fifo_low = fifo_size - burst_size_bytes * 2;
4330 int dsi_init_display(struct omap_dss_device *dssdev)
4332 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4333 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4334 int dsi_module = dsi_get_dsidev_id(dsidev);
4336 DSSDBG("DSI init\n");
4338 /* XXX these should be figured out dynamically */
4339 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
4340 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
4342 if (dsi->vdds_dsi_reg == NULL) {
4343 struct regulator *vdds_dsi;
4345 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
4347 if (IS_ERR(vdds_dsi)) {
4348 DSSERR("can't get VDDS_DSI regulator\n");
4349 return PTR_ERR(vdds_dsi);
4352 dsi->vdds_dsi_reg = vdds_dsi;
4355 if (dsi_get_num_data_lanes_dssdev(dssdev) > dsi->num_data_lanes) {
4356 DSSERR("DSI%d can't support more than %d data lanes\n",
4357 dsi_module + 1, dsi->num_data_lanes);
4364 int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
4366 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4367 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4370 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4371 if (!dsi->vc[i].dssdev) {
4372 dsi->vc[i].dssdev = dssdev;
4378 DSSERR("cannot get VC for display %s", dssdev->name);
4381 EXPORT_SYMBOL(omap_dsi_request_vc);
4383 int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
4385 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4386 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4388 if (vc_id < 0 || vc_id > 3) {
4389 DSSERR("VC ID out of range\n");
4393 if (channel < 0 || channel > 3) {
4394 DSSERR("Virtual Channel out of range\n");
4398 if (dsi->vc[channel].dssdev != dssdev) {
4399 DSSERR("Virtual Channel not allocated to display %s\n",
4404 dsi->vc[channel].vc_id = vc_id;
4408 EXPORT_SYMBOL(omap_dsi_set_vc_id);
4410 void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
4412 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4413 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4415 if ((channel >= 0 && channel <= 3) &&
4416 dsi->vc[channel].dssdev == dssdev) {
4417 dsi->vc[channel].dssdev = NULL;
4418 dsi->vc[channel].vc_id = 0;
4421 EXPORT_SYMBOL(omap_dsi_release_vc);
4423 void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
4425 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
4426 DSSERR("%s (%s) not active\n",
4427 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
4428 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
4431 void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
4433 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
4434 DSSERR("%s (%s) not active\n",
4435 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
4436 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
4439 static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
4441 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4443 dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
4444 dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
4445 dsi->regm_dispc_max =
4446 dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
4447 dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
4448 dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
4449 dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
4450 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
4453 static int dsi_init(struct platform_device *dsidev)
4455 struct omap_display_platform_data *dss_plat_data;
4456 struct omap_dss_board_info *board_info;
4458 int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
4459 struct resource *dsi_mem;
4460 struct dsi_data *dsi;
4462 dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
4469 dsi_pdev_map[dsi_module] = dsidev;
4470 dev_set_drvdata(&dsidev->dev, dsi);
4472 dss_plat_data = dsidev->dev.platform_data;
4473 board_info = dss_plat_data->board_data;
4474 dsi->dsi_mux_pads = board_info->dsi_mux_pads;
4476 spin_lock_init(&dsi->irq_lock);
4477 spin_lock_init(&dsi->errors_lock);
4480 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
4481 spin_lock_init(&dsi->irq_stats_lock);
4482 dsi->irq_stats.last_reset = jiffies;
4485 mutex_init(&dsi->lock);
4486 sema_init(&dsi->bus_lock, 1);
4488 INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
4489 dsi_framedone_timeout_work_callback);
4491 #ifdef DSI_CATCH_MISSING_TE
4492 init_timer(&dsi->te_timer);
4493 dsi->te_timer.function = dsi_te_timeout;
4494 dsi->te_timer.data = 0;
4496 dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
4498 DSSERR("can't get IORESOURCE_MEM DSI\n");
4502 dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
4504 DSSERR("can't ioremap DSI\n");
4508 dsi->irq = platform_get_irq(dsi->pdev, 0);
4510 DSSERR("platform_get_irq failed\n");
4515 r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
4516 dev_name(&dsidev->dev), dsi->pdev);
4518 DSSERR("request_irq failed\n");
4522 /* DSI VCs initialization */
4523 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4524 dsi->vc[i].mode = DSI_VC_MODE_L4;
4525 dsi->vc[i].dssdev = NULL;
4526 dsi->vc[i].vc_id = 0;
4529 dsi_calc_clock_param_ranges(dsidev);
4533 rev = dsi_read_reg(dsidev, DSI_REVISION);
4534 dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
4535 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
4537 dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
4550 static void dsi_exit(struct platform_device *dsidev)
4552 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4554 if (dsi->vdds_dsi_reg != NULL) {
4555 if (dsi->vdds_dsi_enabled) {
4556 regulator_disable(dsi->vdds_dsi_reg);
4557 dsi->vdds_dsi_enabled = false;
4560 regulator_put(dsi->vdds_dsi_reg);
4561 dsi->vdds_dsi_reg = NULL;
4564 free_irq(dsi->irq, dsi->pdev);
4569 DSSDBG("omap_dsi_exit\n");
4572 /* DSI1 HW IP initialisation */
4573 static int omap_dsi1hw_probe(struct platform_device *dsidev)
4577 r = dsi_init(dsidev);
4579 DSSERR("Failed to initialize DSI\n");
4586 static int omap_dsi1hw_remove(struct platform_device *dsidev)
4588 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4591 WARN_ON(dsi->scp_clk_refcount > 0);
4595 static struct platform_driver omap_dsi1hw_driver = {
4596 .probe = omap_dsi1hw_probe,
4597 .remove = omap_dsi1hw_remove,
4599 .name = "omapdss_dsi1",
4600 .owner = THIS_MODULE,
4604 int dsi_init_platform_driver(void)
4606 return platform_driver_register(&omap_dsi1hw_driver);
4609 void dsi_uninit_platform_driver(void)
4611 return platform_driver_unregister(&omap_dsi1hw_driver);