2 * linux/drivers/video/omap2/dss/dss.c
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * Some code and ideas taken from drivers/video/omap/ driver
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
23 #define DSS_SUBSYS_NAME "DSS"
25 #include <linux/kernel.h>
27 #include <linux/err.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/seq_file.h>
31 #include <linux/clk.h>
33 #include <plat/display.h>
36 #define DSS_BASE 0x48050000
38 #define DSS_SZ_REGS SZ_512
44 #define DSS_REG(idx) ((const struct dss_reg) { idx })
46 #define DSS_REVISION DSS_REG(0x0000)
47 #define DSS_SYSCONFIG DSS_REG(0x0010)
48 #define DSS_SYSSTATUS DSS_REG(0x0014)
49 #define DSS_IRQSTATUS DSS_REG(0x0018)
50 #define DSS_CONTROL DSS_REG(0x0040)
51 #define DSS_SDI_CONTROL DSS_REG(0x0044)
52 #define DSS_PLL_CONTROL DSS_REG(0x0048)
53 #define DSS_SDI_STATUS DSS_REG(0x005C)
55 #define REG_GET(idx, start, end) \
56 FLD_GET(dss_read_reg(idx), start, end)
58 #define REG_FLD_MOD(idx, val, start, end) \
59 dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
64 struct clk *dpll4_m4_ck;
66 unsigned long cache_req_pck;
67 unsigned long cache_prate;
68 struct dss_clock_info cache_dss_cinfo;
69 struct dispc_clock_info cache_dispc_cinfo;
71 enum dss_clk_source dsi_clk_source;
72 enum dss_clk_source dispc_clk_source;
74 u32 ctx[DSS_SZ_REGS / sizeof(u32)];
77 static int _omap_dss_wait_reset(void);
79 static inline void dss_write_reg(const struct dss_reg idx, u32 val)
81 __raw_writel(val, dss.base + idx.idx);
84 static inline u32 dss_read_reg(const struct dss_reg idx)
86 return __raw_readl(dss.base + idx.idx);
90 dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
92 dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
94 void dss_save_context(void)
96 if (cpu_is_omap24xx())
102 #ifdef CONFIG_OMAP2_DSS_SDI
108 void dss_restore_context(void)
110 if (_omap_dss_wait_reset())
111 DSSERR("DSS not coming out of reset after sleep\n");
116 #ifdef CONFIG_OMAP2_DSS_SDI
125 void dss_sdi_init(u8 datapairs)
129 BUG_ON(datapairs > 3 || datapairs < 1);
131 l = dss_read_reg(DSS_SDI_CONTROL);
132 l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */
133 l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */
134 l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */
135 dss_write_reg(DSS_SDI_CONTROL, l);
137 l = dss_read_reg(DSS_PLL_CONTROL);
138 l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */
139 l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */
140 l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */
141 dss_write_reg(DSS_PLL_CONTROL, l);
144 int dss_sdi_enable(void)
146 unsigned long timeout;
148 dispc_pck_free_enable(1);
151 REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
152 udelay(1); /* wait 2x PCLK */
155 REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
157 /* Waiting for PLL lock request to complete */
158 timeout = jiffies + msecs_to_jiffies(500);
159 while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) {
160 if (time_after_eq(jiffies, timeout)) {
161 DSSERR("PLL lock request timed out\n");
166 /* Clearing PLL_GO bit */
167 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28);
169 /* Waiting for PLL to lock */
170 timeout = jiffies + msecs_to_jiffies(500);
171 while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) {
172 if (time_after_eq(jiffies, timeout)) {
173 DSSERR("PLL lock timed out\n");
178 dispc_lcd_enable_signal(1);
180 /* Waiting for SDI reset to complete */
181 timeout = jiffies + msecs_to_jiffies(500);
182 while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) {
183 if (time_after_eq(jiffies, timeout)) {
184 DSSERR("SDI reset timed out\n");
192 dispc_lcd_enable_signal(0);
195 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
197 dispc_pck_free_enable(0);
202 void dss_sdi_disable(void)
204 dispc_lcd_enable_signal(0);
206 dispc_pck_free_enable(0);
209 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
212 void dss_dump_clocks(struct seq_file *s)
214 unsigned long dpll4_ck_rate;
215 unsigned long dpll4_m4_ck_rate;
217 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
219 dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
220 dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
222 seq_printf(s, "- DSS -\n");
224 seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
226 if (cpu_is_omap3630())
227 seq_printf(s, "dss1_alwon_fclk = %lu / %lu = %lu\n",
229 dpll4_ck_rate / dpll4_m4_ck_rate,
230 dss_clk_get_rate(DSS_CLK_FCK1));
232 seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
234 dpll4_ck_rate / dpll4_m4_ck_rate,
235 dss_clk_get_rate(DSS_CLK_FCK1));
237 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
240 void dss_dump_regs(struct seq_file *s)
242 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
244 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
246 DUMPREG(DSS_REVISION);
247 DUMPREG(DSS_SYSCONFIG);
248 DUMPREG(DSS_SYSSTATUS);
249 DUMPREG(DSS_IRQSTATUS);
250 DUMPREG(DSS_CONTROL);
251 DUMPREG(DSS_SDI_CONTROL);
252 DUMPREG(DSS_PLL_CONTROL);
253 DUMPREG(DSS_SDI_STATUS);
255 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
259 void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
263 BUG_ON(clk_src != DSS_SRC_DSI1_PLL_FCLK &&
264 clk_src != DSS_SRC_DSS1_ALWON_FCLK);
266 b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
268 if (clk_src == DSS_SRC_DSI1_PLL_FCLK)
269 dsi_wait_dsi1_pll_active();
271 REG_FLD_MOD(DSS_CONTROL, b, 0, 0); /* DISPC_CLK_SWITCH */
273 dss.dispc_clk_source = clk_src;
276 void dss_select_dsi_clk_source(enum dss_clk_source clk_src)
280 BUG_ON(clk_src != DSS_SRC_DSI2_PLL_FCLK &&
281 clk_src != DSS_SRC_DSS1_ALWON_FCLK);
283 b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
285 if (clk_src == DSS_SRC_DSI2_PLL_FCLK)
286 dsi_wait_dsi2_pll_active();
288 REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
290 dss.dsi_clk_source = clk_src;
293 enum dss_clk_source dss_get_dispc_clk_source(void)
295 return dss.dispc_clk_source;
298 enum dss_clk_source dss_get_dsi_clk_source(void)
300 return dss.dsi_clk_source;
303 /* calculate clock rates using dividers in cinfo */
304 int dss_calc_clock_rates(struct dss_clock_info *cinfo)
308 if (cinfo->fck_div > (cpu_is_omap3630() ? 32 : 16) ||
312 prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
314 cinfo->fck = prate / cinfo->fck_div;
319 int dss_set_clock_div(struct dss_clock_info *cinfo)
324 if (cpu_is_omap34xx()) {
325 prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
326 DSSDBG("dpll4_m4 = %ld\n", prate);
328 r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
333 DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
338 int dss_get_clock_div(struct dss_clock_info *cinfo)
340 cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1);
342 if (cpu_is_omap34xx()) {
344 prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
345 if (cpu_is_omap3630())
346 cinfo->fck_div = prate / (cinfo->fck);
348 cinfo->fck_div = prate / (cinfo->fck / 2);
356 unsigned long dss_get_dpll4_rate(void)
358 if (cpu_is_omap34xx())
359 return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
364 int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
365 struct dss_clock_info *dss_cinfo,
366 struct dispc_clock_info *dispc_cinfo)
369 struct dss_clock_info best_dss;
370 struct dispc_clock_info best_dispc;
379 prate = dss_get_dpll4_rate();
381 fck = dss_clk_get_rate(DSS_CLK_FCK1);
382 if (req_pck == dss.cache_req_pck &&
383 ((cpu_is_omap34xx() && prate == dss.cache_prate) ||
384 dss.cache_dss_cinfo.fck == fck)) {
385 DSSDBG("dispc clock info found from cache.\n");
386 *dss_cinfo = dss.cache_dss_cinfo;
387 *dispc_cinfo = dss.cache_dispc_cinfo;
391 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
393 if (min_fck_per_pck &&
394 req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
395 DSSERR("Requested pixel clock not possible with the current "
396 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
397 "the constraint off.\n");
402 memset(&best_dss, 0, sizeof(best_dss));
403 memset(&best_dispc, 0, sizeof(best_dispc));
405 if (cpu_is_omap24xx()) {
406 struct dispc_clock_info cur_dispc;
407 /* XXX can we change the clock on omap2? */
408 fck = dss_clk_get_rate(DSS_CLK_FCK1);
411 dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
415 best_dss.fck_div = fck_div;
417 best_dispc = cur_dispc;
420 } else if (cpu_is_omap34xx()) {
421 for (fck_div = (cpu_is_omap3630() ? 32 : 16);
422 fck_div > 0; --fck_div) {
423 struct dispc_clock_info cur_dispc;
425 if (cpu_is_omap3630())
426 fck = prate / fck_div;
428 fck = prate / fck_div * 2;
430 if (fck > DISPC_MAX_FCK)
433 if (min_fck_per_pck &&
434 fck < req_pck * min_fck_per_pck)
439 dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
441 if (abs(cur_dispc.pck - req_pck) <
442 abs(best_dispc.pck - req_pck)) {
445 best_dss.fck_div = fck_div;
447 best_dispc = cur_dispc;
449 if (cur_dispc.pck == req_pck)
459 if (min_fck_per_pck) {
460 DSSERR("Could not find suitable clock settings.\n"
461 "Turning FCK/PCK constraint off and"
467 DSSERR("Could not find suitable clock settings.\n");
473 *dss_cinfo = best_dss;
475 *dispc_cinfo = best_dispc;
477 dss.cache_req_pck = req_pck;
478 dss.cache_prate = prate;
479 dss.cache_dss_cinfo = best_dss;
480 dss.cache_dispc_cinfo = best_dispc;
487 static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
494 static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
498 irqstatus = dss_read_reg(DSS_IRQSTATUS);
500 if (irqstatus & (1<<0)) /* DISPC_IRQ */
502 #ifdef CONFIG_OMAP2_DSS_DSI
503 if (irqstatus & (1<<1)) /* DSI_IRQ */
510 static int _omap_dss_wait_reset(void)
514 while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
516 DSSERR("soft reset failed\n");
525 static int _omap_dss_reset(void)
528 REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
529 return _omap_dss_wait_reset();
532 void dss_set_venc_output(enum omap_dss_venc_type type)
536 if (type == OMAP_DSS_VENC_TYPE_COMPOSITE)
538 else if (type == OMAP_DSS_VENC_TYPE_SVIDEO)
543 /* venc out selection. 0 = comp, 1 = svideo */
544 REG_FLD_MOD(DSS_CONTROL, l, 6, 6);
547 void dss_set_dac_pwrdn_bgz(bool enable)
549 REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
552 int dss_init(bool skip_init)
557 dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
559 DSSERR("can't ioremap DSS\n");
565 /* disable LCD and DIGIT output. This seems to fix the synclost
566 * problem that we get, if the bootloader starts the DSS and
567 * the kernel resets it */
568 omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
570 /* We need to wait here a bit, otherwise we sometimes start to
571 * get synclost errors, and after that only power cycle will
572 * restore DSS functionality. I have no idea why this happens.
573 * And we have to wait _before_ resetting the DSS, but after
582 REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
585 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
587 #ifdef CONFIG_OMAP2_DSS_VENC
588 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
589 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
590 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
593 r = request_irq(INT_24XX_DSS_IRQ,
595 ? dss_irq_handler_omap2
596 : dss_irq_handler_omap3,
597 0, "OMAP DSS", NULL);
600 DSSERR("omap2 dss: request_irq failed\n");
604 if (cpu_is_omap34xx()) {
605 dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
606 if (IS_ERR(dss.dpll4_m4_ck)) {
607 DSSERR("Failed to get dpll4_m4_ck\n");
608 r = PTR_ERR(dss.dpll4_m4_ck);
613 dss.dsi_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
614 dss.dispc_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
618 rev = dss_read_reg(DSS_REVISION);
619 printk(KERN_INFO "OMAP DSS rev %d.%d\n",
620 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
625 free_irq(INT_24XX_DSS_IRQ, NULL);
634 if (cpu_is_omap34xx())
635 clk_put(dss.dpll4_m4_ck);
637 free_irq(INT_24XX_DSS_IRQ, NULL);