Merge branch 'rmobile-latest' of git://git.kernel.org/pub/scm/linux/kernel/git/lethal...
[pandora-kernel.git] / drivers / video / omap2 / dss / dsi.c
index fe3578b..0a7f1a4 100644 (file)
@@ -185,13 +185,15 @@ struct dsi_reg { u16 idx; };
 #define DSI_DT_RX_SHORT_READ_1         0x21
 #define DSI_DT_RX_SHORT_READ_2         0x22
 
-#define FINT_MAX 2100000
-#define FINT_MIN 750000
-#define REGN_MAX (1 << 7)
-#define REGM_MAX ((1 << 11) - 1)
-#define REGM_DISPC_MAX (1 << 4)
-#define REGM_DSI_MAX (1 << 4)
-#define LP_DIV_MAX ((1 << 13) - 1)
+typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+
+#define DSI_MAX_NR_ISRS                2
+
+struct dsi_isr_data {
+       omap_dsi_isr_t  isr;
+       void            *arg;
+       u32             mask;
+};
 
 enum fifo_size {
        DSI_FIFO_SIZE_0         = 0,
@@ -219,6 +221,12 @@ struct dsi_irq_stats {
        unsigned cio_irqs[32];
 };
 
+struct dsi_isr_tables {
+       struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
+       struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
+       struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
+};
+
 static struct
 {
        struct platform_device *pdev;
@@ -241,8 +249,10 @@ static struct
 
        unsigned pll_locked;
 
-       struct completion bta_completion;
-       void (*bta_callback)(void);
+       spinlock_t irq_lock;
+       struct dsi_isr_tables isr_tables;
+       /* space for a copy used by the interrupt handler */
+       struct dsi_isr_tables isr_tables_copy;
 
        int update_channel;
        struct dsi_update_region update_region;
@@ -277,6 +287,11 @@ static struct
        spinlock_t irq_stats_lock;
        struct dsi_irq_stats irq_stats;
 #endif
+       /* DSI PLL Parameter Ranges */
+       unsigned long regm_max, regn_max;
+       unsigned long  regm_dispc_max, regm_dsi_max;
+       unsigned long  fint_min, fint_max;
+       unsigned long lpdiv_max;
 } dsi;
 
 #ifdef DEBUG
@@ -320,6 +335,11 @@ static bool dsi_bus_is_locked(void)
        return dsi.bus_lock.count == 0;
 }
 
+static void dsi_completion_handler(void *data, u32 mask)
+{
+       complete((struct completion *)data);
+}
+
 static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
                int value)
 {
@@ -389,6 +409,9 @@ static void dsi_perf_show(const char *name)
 
 static void print_irq_status(u32 status)
 {
+       if (status == 0)
+               return;
+
 #ifndef VERBOSE_IRQ
        if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
                return;
@@ -424,6 +447,9 @@ static void print_irq_status(u32 status)
 
 static void print_irq_status_vc(int channel, u32 status)
 {
+       if (status == 0)
+               return;
+
 #ifndef VERBOSE_IRQ
        if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
                return;
@@ -450,6 +476,9 @@ static void print_irq_status_vc(int channel, u32 status)
 
 static void print_irq_status_cio(u32 status)
 {
+       if (status == 0)
+               return;
+
        printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
 
 #define PIS(x) \
@@ -480,26 +509,33 @@ static void print_irq_status_cio(u32 status)
        printk("\n");
 }
 
-static int debug_irq;
-
-/* called from dss */
-static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+static void dsi_collect_irq_stats(u32 irqstatus, u32 *vcstatus, u32 ciostatus)
 {
-       u32 irqstatus, vcstatus, ciostatus;
        int i;
 
-       irqstatus = dsi_read_reg(DSI_IRQSTATUS);
-
-       /* IRQ is not for us */
-       if (!irqstatus)
-               return IRQ_NONE;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
        spin_lock(&dsi.irq_stats_lock);
+
        dsi.irq_stats.irq_count++;
        dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs);
+
+       for (i = 0; i < 4; ++i)
+               dss_collect_irq_stats(vcstatus[i], dsi.irq_stats.vc_irqs[i]);
+
+       dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
+
+       spin_unlock(&dsi.irq_stats_lock);
+}
+#else
+#define dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus)
 #endif
 
+static int debug_irq;
+
+static void dsi_handle_irq_errors(u32 irqstatus, u32 *vcstatus, u32 ciostatus)
+{
+       int i;
+
        if (irqstatus & DSI_IRQ_ERROR_MASK) {
                DSSERR("DSI error, irqstatus %x\n", irqstatus);
                print_irq_status(irqstatus);
@@ -510,37 +546,88 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
                print_irq_status(irqstatus);
        }
 
-#ifdef DSI_CATCH_MISSING_TE
-       if (irqstatus & DSI_IRQ_TE_TRIGGER)
-               del_timer(&dsi.te_timer);
-#endif
+       for (i = 0; i < 4; ++i) {
+               if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
+                       DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
+                                      i, vcstatus[i]);
+                       print_irq_status_vc(i, vcstatus[i]);
+               } else if (debug_irq) {
+                       print_irq_status_vc(i, vcstatus[i]);
+               }
+       }
+
+       if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
+               DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
+               print_irq_status_cio(ciostatus);
+       } else if (debug_irq) {
+               print_irq_status_cio(ciostatus);
+       }
+}
+
+static void dsi_call_isrs(struct dsi_isr_data *isr_array,
+               unsigned isr_array_size, u32 irqstatus)
+{
+       struct dsi_isr_data *isr_data;
+       int i;
+
+       for (i = 0; i < isr_array_size; i++) {
+               isr_data = &isr_array[i];
+               if (isr_data->isr && isr_data->mask & irqstatus)
+                       isr_data->isr(isr_data->arg, irqstatus);
+       }
+}
+
+static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
+               u32 irqstatus, u32 *vcstatus, u32 ciostatus)
+{
+       int i;
+
+       dsi_call_isrs(isr_tables->isr_table,
+                       ARRAY_SIZE(isr_tables->isr_table),
+                       irqstatus);
 
        for (i = 0; i < 4; ++i) {
-               if ((irqstatus & (1<<i)) == 0)
+               if (vcstatus[i] == 0)
                        continue;
+               dsi_call_isrs(isr_tables->isr_table_vc[i],
+                               ARRAY_SIZE(isr_tables->isr_table_vc[i]),
+                               vcstatus[i]);
+       }
 
-               vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+       if (ciostatus != 0)
+               dsi_call_isrs(isr_tables->isr_table_cio,
+                               ARRAY_SIZE(isr_tables->isr_table_cio),
+                               ciostatus);
+}
 
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-               dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]);
-#endif
+static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
+{
+       u32 irqstatus, vcstatus[4], ciostatus;
+       int i;
 
-               if (vcstatus & DSI_VC_IRQ_BTA) {
-                       complete(&dsi.bta_completion);
+       spin_lock(&dsi.irq_lock);
 
-                       if (dsi.bta_callback)
-                               dsi.bta_callback();
-               }
+       irqstatus = dsi_read_reg(DSI_IRQSTATUS);
 
-               if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
-                       DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
-                                      i, vcstatus);
-                       print_irq_status_vc(i, vcstatus);
-               } else if (debug_irq) {
-                       print_irq_status_vc(i, vcstatus);
+       /* IRQ is not for us */
+       if (!irqstatus) {
+               spin_unlock(&dsi.irq_lock);
+               return IRQ_NONE;
+       }
+
+       dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
+       /* flush posted write */
+       dsi_read_reg(DSI_IRQSTATUS);
+
+       for (i = 0; i < 4; ++i) {
+               if ((irqstatus & (1 << i)) == 0) {
+                       vcstatus[i] = 0;
+                       continue;
                }
 
-               dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
+               vcstatus[i] = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+
+               dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus[i]);
                /* flush posted write */
                dsi_read_reg(DSI_VC_IRQSTATUS(i));
        }
@@ -548,99 +635,289 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
        if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
                ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
 
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-               dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
-#endif
-
                dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
                /* flush posted write */
                dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
-
-               if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
-                       DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
-                       print_irq_status_cio(ciostatus);
-               } else if (debug_irq) {
-                       print_irq_status_cio(ciostatus);
-               }
+       } else {
+               ciostatus = 0;
        }
 
-       dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
-       /* flush posted write */
-       dsi_read_reg(DSI_IRQSTATUS);
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-       spin_unlock(&dsi.irq_stats_lock);
+#ifdef DSI_CATCH_MISSING_TE
+       if (irqstatus & DSI_IRQ_TE_TRIGGER)
+               del_timer(&dsi.te_timer);
 #endif
+
+       /* make a copy and unlock, so that isrs can unregister
+        * themselves */
+       memcpy(&dsi.isr_tables_copy, &dsi.isr_tables, sizeof(dsi.isr_tables));
+
+       spin_unlock(&dsi.irq_lock);
+
+       dsi_handle_isrs(&dsi.isr_tables_copy, irqstatus, vcstatus, ciostatus);
+
+       dsi_handle_irq_errors(irqstatus, vcstatus, ciostatus);
+
+       dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus);
+
        return IRQ_HANDLED;
 }
 
-static void _dsi_initialize_irq(void)
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array,
+               unsigned isr_array_size, u32 default_mask,
+               const struct dsi_reg enable_reg,
+               const struct dsi_reg status_reg)
 {
-       u32 l;
+       struct dsi_isr_data *isr_data;
+       u32 mask;
+       u32 old_mask;
        int i;
 
-       /* disable all interrupts */
-       dsi_write_reg(DSI_IRQENABLE, 0);
-       for (i = 0; i < 4; ++i)
-               dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
-       dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
+       mask = default_mask;
 
-       /* clear interrupt status */
-       l = dsi_read_reg(DSI_IRQSTATUS);
-       dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
+       for (i = 0; i < isr_array_size; i++) {
+               isr_data = &isr_array[i];
 
-       for (i = 0; i < 4; ++i) {
-               l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
-               dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
+               if (isr_data->isr == NULL)
+                       continue;
+
+               mask |= isr_data->mask;
        }
 
-       l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
-       dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
+       old_mask = dsi_read_reg(enable_reg);
+       /* clear the irqstatus for newly enabled irqs */
+       dsi_write_reg(status_reg, (mask ^ old_mask) & mask);
+       dsi_write_reg(enable_reg, mask);
+
+       /* flush posted writes */
+       dsi_read_reg(enable_reg);
+       dsi_read_reg(status_reg);
+}
 
-       /* enable error irqs */
-       l = DSI_IRQ_ERROR_MASK;
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_set_irqs(void)
+{
+       u32 mask = DSI_IRQ_ERROR_MASK;
 #ifdef DSI_CATCH_MISSING_TE
-       l |= DSI_IRQ_TE_TRIGGER;
+       mask |= DSI_IRQ_TE_TRIGGER;
 #endif
-       dsi_write_reg(DSI_IRQENABLE, l);
+       _omap_dsi_configure_irqs(dsi.isr_tables.isr_table,
+                       ARRAY_SIZE(dsi.isr_tables.isr_table), mask,
+                       DSI_IRQENABLE, DSI_IRQSTATUS);
+}
 
-       l = DSI_VC_IRQ_ERROR_MASK;
-       for (i = 0; i < 4; ++i)
-               dsi_write_reg(DSI_VC_IRQENABLE(i), l);
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_set_irqs_vc(int vc)
+{
+       _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_vc[vc],
+                       ARRAY_SIZE(dsi.isr_tables.isr_table_vc[vc]),
+                       DSI_VC_IRQ_ERROR_MASK,
+                       DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
+}
 
-       l = DSI_CIO_IRQ_ERROR_MASK;
-       dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, l);
+/* dsi.irq_lock has to be locked by the caller */
+static void _omap_dsi_set_irqs_cio(void)
+{
+       _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_cio,
+                       ARRAY_SIZE(dsi.isr_tables.isr_table_cio),
+                       DSI_CIO_IRQ_ERROR_MASK,
+                       DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
 }
 
-static u32 dsi_get_errors(void)
+static void _dsi_initialize_irq(void)
 {
        unsigned long flags;
-       u32 e;
-       spin_lock_irqsave(&dsi.errors_lock, flags);
-       e = dsi.errors;
-       dsi.errors = 0;
-       spin_unlock_irqrestore(&dsi.errors_lock, flags);
-       return e;
+       int vc;
+
+       spin_lock_irqsave(&dsi.irq_lock, flags);
+
+       memset(&dsi.isr_tables, 0, sizeof(dsi.isr_tables));
+
+       _omap_dsi_set_irqs();
+       for (vc = 0; vc < 4; ++vc)
+               _omap_dsi_set_irqs_vc(vc);
+       _omap_dsi_set_irqs_cio();
+
+       spin_unlock_irqrestore(&dsi.irq_lock, flags);
 }
 
-static void dsi_vc_enable_bta_irq(int channel)
+static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
+               struct dsi_isr_data *isr_array, unsigned isr_array_size)
 {
-       u32 l;
+       struct dsi_isr_data *isr_data;
+       int free_idx;
+       int i;
+
+       BUG_ON(isr == NULL);
 
-       dsi_write_reg(DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
+       /* check for duplicate entry and find a free slot */
+       free_idx = -1;
+       for (i = 0; i < isr_array_size; i++) {
+               isr_data = &isr_array[i];
+
+               if (isr_data->isr == isr && isr_data->arg == arg &&
+                               isr_data->mask == mask) {
+                       return -EINVAL;
+               }
+
+               if (isr_data->isr == NULL && free_idx == -1)
+                       free_idx = i;
+       }
 
-       l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
-       l |= DSI_VC_IRQ_BTA;
-       dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+       if (free_idx == -1)
+               return -EBUSY;
+
+       isr_data = &isr_array[free_idx];
+       isr_data->isr = isr;
+       isr_data->arg = arg;
+       isr_data->mask = mask;
+
+       return 0;
 }
 
-static void dsi_vc_disable_bta_irq(int channel)
+static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
+               struct dsi_isr_data *isr_array, unsigned isr_array_size)
 {
-       u32 l;
+       struct dsi_isr_data *isr_data;
+       int i;
+
+       for (i = 0; i < isr_array_size; i++) {
+               isr_data = &isr_array[i];
+               if (isr_data->isr != isr || isr_data->arg != arg ||
+                               isr_data->mask != mask)
+                       continue;
+
+               isr_data->isr = NULL;
+               isr_data->arg = NULL;
+               isr_data->mask = 0;
+
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static int dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask)
+{
+       unsigned long flags;
+       int r;
+
+       spin_lock_irqsave(&dsi.irq_lock, flags);
+
+       r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table,
+                       ARRAY_SIZE(dsi.isr_tables.isr_table));
+
+       if (r == 0)
+               _omap_dsi_set_irqs();
+
+       spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+       return r;
+}
+
+static int dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask)
+{
+       unsigned long flags;
+       int r;
+
+       spin_lock_irqsave(&dsi.irq_lock, flags);
+
+       r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table,
+                       ARRAY_SIZE(dsi.isr_tables.isr_table));
+
+       if (r == 0)
+               _omap_dsi_set_irqs();
+
+       spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+       return r;
+}
+
+static int dsi_register_isr_vc(int channel, omap_dsi_isr_t isr, void *arg,
+               u32 mask)
+{
+       unsigned long flags;
+       int r;
+
+       spin_lock_irqsave(&dsi.irq_lock, flags);
+
+       r = _dsi_register_isr(isr, arg, mask,
+                       dsi.isr_tables.isr_table_vc[channel],
+                       ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel]));
+
+       if (r == 0)
+               _omap_dsi_set_irqs_vc(channel);
+
+       spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+       return r;
+}
+
+static int dsi_unregister_isr_vc(int channel, omap_dsi_isr_t isr, void *arg,
+               u32 mask)
+{
+       unsigned long flags;
+       int r;
+
+       spin_lock_irqsave(&dsi.irq_lock, flags);
+
+       r = _dsi_unregister_isr(isr, arg, mask,
+                       dsi.isr_tables.isr_table_vc[channel],
+                       ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel]));
+
+       if (r == 0)
+               _omap_dsi_set_irqs_vc(channel);
+
+       spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+       return r;
+}
+
+static int dsi_register_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask)
+{
+       unsigned long flags;
+       int r;
+
+       spin_lock_irqsave(&dsi.irq_lock, flags);
+
+       r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio,
+                       ARRAY_SIZE(dsi.isr_tables.isr_table_cio));
+
+       if (r == 0)
+               _omap_dsi_set_irqs_cio();
+
+       spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+       return r;
+}
 
-       l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
-       l &= ~DSI_VC_IRQ_BTA;
-       dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+static int dsi_unregister_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask)
+{
+       unsigned long flags;
+       int r;
+
+       spin_lock_irqsave(&dsi.irq_lock, flags);
+
+       r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio,
+                       ARRAY_SIZE(dsi.isr_tables.isr_table_cio));
+
+       if (r == 0)
+               _omap_dsi_set_irqs_cio();
+
+       spin_unlock_irqrestore(&dsi.irq_lock, flags);
+
+       return r;
+}
+
+static u32 dsi_get_errors(void)
+{
+       unsigned long flags;
+       u32 e;
+       spin_lock_irqsave(&dsi.errors_lock, flags);
+       e = dsi.errors;
+       dsi.errors = 0;
+       spin_unlock_irqrestore(&dsi.errors_lock, flags);
+       return e;
 }
 
 /* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
@@ -751,7 +1028,7 @@ static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
 
        lp_clk_div = dssdev->phy.dsi.div.lp_clk_div;
 
-       if (lp_clk_div == 0 || lp_clk_div > LP_DIV_MAX)
+       if (lp_clk_div == 0 || lp_clk_div > dsi.lpdiv_max)
                return -EINVAL;
 
        dsi_fclk = dsi_fclk_rate();
@@ -801,16 +1078,16 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
 static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
                struct dsi_clock_info *cinfo)
 {
-       if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
+       if (cinfo->regn == 0 || cinfo->regn > dsi.regn_max)
                return -EINVAL;
 
-       if (cinfo->regm == 0 || cinfo->regm > REGM_MAX)
+       if (cinfo->regm == 0 || cinfo->regm > dsi.regm_max)
                return -EINVAL;
 
-       if (cinfo->regm_dispc > REGM_DISPC_MAX)
+       if (cinfo->regm_dispc > dsi.regm_dispc_max)
                return -EINVAL;
 
-       if (cinfo->regm_dsi > REGM_DSI_MAX)
+       if (cinfo->regm_dsi > dsi.regm_dsi_max)
                return -EINVAL;
 
        if (cinfo->use_sys_clk) {
@@ -829,7 +1106,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
 
        cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
 
-       if (cinfo->fint > FINT_MAX || cinfo->fint < FINT_MIN)
+       if (cinfo->fint > dsi.fint_max || cinfo->fint < dsi.fint_min)
                return -EINVAL;
 
        cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
@@ -864,7 +1141,7 @@ int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
 
        dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
 
-       max_dss_fck = dss_feat_get_max_dss_fck();
+       max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
 
        if (req_pck == dsi.cache_req_pck &&
                        dsi.cache_cinfo.clkin == dss_sys_clk) {
@@ -899,17 +1176,17 @@ retry:
        /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
        /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
        /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
-       for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
+       for (cur.regn = 1; cur.regn < dsi.regn_max; ++cur.regn) {
                if (cur.highfreq == 0)
                        cur.fint = cur.clkin / cur.regn;
                else
                        cur.fint = cur.clkin / (2 * cur.regn);
 
-               if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
+               if (cur.fint > dsi.fint_max || cur.fint < dsi.fint_min)
                        continue;
 
                /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
-               for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
+               for (cur.regm = 1; cur.regm < dsi.regm_max; ++cur.regm) {
                        unsigned long a, b;
 
                        a = 2 * cur.regm * (cur.clkin/1000);
@@ -921,7 +1198,7 @@ retry:
 
                        /* dsi_pll_hsdiv_dispc_clk(MHz) =
                         * DSIPHY(MHz) / regm_dispc  < 173MHz/186Mhz */
-                       for (cur.regm_dispc = 1; cur.regm_dispc < REGM_DISPC_MAX;
+                       for (cur.regm_dispc = 1; cur.regm_dispc < dsi.regm_dispc_max;
                                        ++cur.regm_dispc) {
                                struct dispc_clock_info cur_dispc;
                                cur.dsi_pll_hsdiv_dispc_clk =
@@ -994,6 +1271,8 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
        int r = 0;
        u32 l;
        int f;
+       u8 regn_start, regn_end, regm_start, regm_end;
+       u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
 
        DSSDBGF();
 
@@ -1038,19 +1317,30 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
                dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
                cinfo->dsi_pll_hsdiv_dsi_clk);
 
+       dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
+       dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, &regm_start, &regm_end);
+       dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, &regm_dispc_start,
+                       &regm_dispc_end);
+       dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
+                       &regm_dsi_end);
+
        REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
 
        l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
        l = FLD_MOD(l, 1, 0, 0);                /* DSI_PLL_STOPMODE */
-       l = FLD_MOD(l, cinfo->regn - 1, 7, 1);  /* DSI_PLL_REGN */
-       l = FLD_MOD(l, cinfo->regm, 18, 8);     /* DSI_PLL_REGM */
+       /* DSI_PLL_REGN */
+       l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
+       /* DSI_PLL_REGM */
+       l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
+       /* DSI_CLOCK_DIV */
        l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
-                       22, 19);                /* DSI_CLOCK_DIV */
+                       regm_dispc_start, regm_dispc_end);
+       /* DSIPROTO_CLOCK_DIV */
        l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
-                       26, 23);                /* DSIPROTO_CLOCK_DIV */
+                       regm_dsi_start, regm_dsi_end);
        dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
 
-       BUG_ON(cinfo->fint < 750000 || cinfo->fint > 2100000);
+       BUG_ON(cinfo->fint < dsi.fint_min || cinfo->fint > dsi.fint_max);
        if (cinfo->fint < 1000000)
                f = 0x3;
        else if (cinfo->fint < 1250000)
@@ -1119,6 +1409,26 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
 
        DSSDBG("PLL init\n");
 
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+       /*
+        * HACK: this is just a quick hack to get the USE_DSI_PLL
+        * option working. USE_DSI_PLL is itself a big hack, and
+        * should be removed.
+        */
+       if (dsi.vdds_dsi_reg == NULL) {
+               struct regulator *vdds_dsi;
+
+               vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi");
+
+               if (IS_ERR(vdds_dsi)) {
+                       DSSERR("can't get VDDS_DSI regulator\n");
+                       return PTR_ERR(vdds_dsi);
+               }
+
+               dsi.vdds_dsi_reg = vdds_dsi;
+       }
+#endif
+
        enable_clocks(1);
        dsi_enable_pll_clock(1);
 
@@ -1932,33 +2242,44 @@ static int dsi_vc_send_bta(int channel)
 
 int dsi_vc_send_bta_sync(int channel)
 {
+       DECLARE_COMPLETION_ONSTACK(completion);
        int r = 0;
        u32 err;
 
-       INIT_COMPLETION(dsi.bta_completion);
+       r = dsi_register_isr_vc(channel, dsi_completion_handler,
+                       &completion, DSI_VC_IRQ_BTA);
+       if (r)
+               goto err0;
 
-       dsi_vc_enable_bta_irq(channel);
+       r = dsi_register_isr(dsi_completion_handler, &completion,
+                       DSI_IRQ_ERROR_MASK);
+       if (r)
+               goto err1;
 
        r = dsi_vc_send_bta(channel);
        if (r)
-               goto err;
+               goto err2;
 
-       if (wait_for_completion_timeout(&dsi.bta_completion,
+       if (wait_for_completion_timeout(&completion,
                                msecs_to_jiffies(500)) == 0) {
                DSSERR("Failed to receive BTA\n");
                r = -EIO;
-               goto err;
+               goto err2;
        }
 
        err = dsi_get_errors();
        if (err) {
                DSSERR("Error while sending BTA: %x\n", err);
                r = -EIO;
-               goto err;
+               goto err2;
        }
-err:
-       dsi_vc_disable_bta_irq(channel);
-
+err2:
+       dsi_unregister_isr(dsi_completion_handler, &completion,
+                       DSI_IRQ_ERROR_MASK);
+err1:
+       dsi_unregister_isr_vc(channel, dsi_completion_handler,
+                       &completion, DSI_VC_IRQ_BTA);
+err0:
        return r;
 }
 EXPORT_SYMBOL(dsi_vc_send_bta_sync);
@@ -2772,19 +3093,20 @@ static void dsi_te_timeout(unsigned long arg)
 }
 #endif
 
+static void dsi_framedone_bta_callback(void *data, u32 mask);
+
 static void dsi_handle_framedone(int error)
 {
        const int channel = dsi.update_channel;
 
-       cancel_delayed_work(&dsi.framedone_timeout_work);
+       dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback,
+                       NULL, DSI_VC_IRQ_BTA);
 
-       dsi_vc_disable_bta_irq(channel);
+       cancel_delayed_work(&dsi.framedone_timeout_work);
 
        /* SIDLEMODE back to smart-idle */
        dispc_enable_sidle();
 
-       dsi.bta_callback = NULL;
-
        if (dsi.te_enabled) {
                /* enable LP_RX_TO again after the TE */
                REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
@@ -2818,7 +3140,7 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
        dsi_handle_framedone(-ETIMEDOUT);
 }
 
-static void dsi_framedone_bta_callback(void)
+static void dsi_framedone_bta_callback(void *data, u32 mask)
 {
        dsi_handle_framedone(0);
 
@@ -2858,15 +3180,19 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
         * asynchronously.
         * */
 
-       dsi.bta_callback = dsi_framedone_bta_callback;
-
-       barrier();
-
-       dsi_vc_enable_bta_irq(channel);
+       r = dsi_register_isr_vc(channel, dsi_framedone_bta_callback,
+                       NULL, DSI_VC_IRQ_BTA);
+       if (r) {
+               DSSERR("Failed to register BTA ISR\n");
+               dsi_handle_framedone(-EIO);
+               return;
+       }
 
        r = dsi_vc_send_bta(channel);
        if (r) {
                DSSERR("BTA after framedone failed\n");
+               dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback,
+                               NULL, DSI_VC_IRQ_BTA);
                dsi_handle_framedone(-EIO);
        }
 }
@@ -3230,9 +3556,6 @@ int dsi_init_display(struct omap_dss_device *dssdev)
        dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
                OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
 
-       dsi.vc[0].dssdev = dssdev;
-       dsi.vc[1].dssdev = dssdev;
-
        if (dsi.vdds_dsi_reg == NULL) {
                struct regulator *vdds_dsi;
 
@@ -3316,12 +3639,24 @@ void dsi_wait_pll_hsdiv_dsi_active(void)
                        dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
 }
 
+static void dsi_calc_clock_param_ranges(void)
+{
+       dsi.regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
+       dsi.regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
+       dsi.regm_dispc_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
+       dsi.regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
+       dsi.fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
+       dsi.fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
+       dsi.lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
+}
+
 static int dsi_init(struct platform_device *pdev)
 {
        u32 rev;
        int r, i;
        struct resource *dsi_mem;
 
+       spin_lock_init(&dsi.irq_lock);
        spin_lock_init(&dsi.errors_lock);
        dsi.errors = 0;
 
@@ -3330,8 +3665,6 @@ static int dsi_init(struct platform_device *pdev)
        dsi.irq_stats.last_reset = jiffies;
 #endif
 
-       init_completion(&dsi.bta_completion);
-
        mutex_init(&dsi.lock);
        sema_init(&dsi.bus_lock, 1);
 
@@ -3380,6 +3713,8 @@ static int dsi_init(struct platform_device *pdev)
                dsi.vc[i].vc_id = 0;
        }
 
+       dsi_calc_clock_param_ranges();
+
        enable_clocks(1);
 
        rev = dsi_read_reg(DSI_REVISION);