dmaengine: imx: fix the build failure on x86_64
[pandora-kernel.git] / drivers / dma / imx-sdma.c
index f993955..434fb61 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/bitops.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
@@ -42,6 +43,8 @@
 #include <mach/dma.h>
 #include <mach/hardware.h>
 
+#include "dmaengine.h"
+
 /* SDMA registers */
 #define SDMA_H_C0PTR           0x000
 #define SDMA_H_INTR            0x004
@@ -247,7 +250,7 @@ struct sdma_engine;
 struct sdma_channel {
        struct sdma_engine              *sdma;
        unsigned int                    channel;
-       enum dma_data_direction         direction;
+       enum dma_transfer_direction             direction;
        enum sdma_peripheral_type       peripheral_type;
        unsigned int                    event_id0;
        unsigned int                    event_id1;
@@ -260,17 +263,18 @@ struct sdma_channel {
        unsigned int                    pc_from_device, pc_to_device;
        unsigned long                   flags;
        dma_addr_t                      per_address;
-       u32                             event_mask0, event_mask1;
-       u32                             watermark_level;
+       unsigned long                   event_mask[2];
+       unsigned long                   watermark_level;
        u32                             shp_addr, per_addr;
        struct dma_chan                 chan;
        spinlock_t                      lock;
        struct dma_async_tx_descriptor  desc;
-       dma_cookie_t                    last_completed;
        enum dma_status                 status;
+       unsigned int                    chn_count;
+       unsigned int                    chn_real_count;
 };
 
-#define IMX_DMA_SG_LOOP                (1 << 0)
+#define IMX_DMA_SG_LOOP                BIT(0)
 
 #define MAX_DMA_CHANNELS 32
 #define MXC_SDMA_DEFAULT_PRIORITY 1
@@ -344,9 +348,9 @@ static const struct of_device_id sdma_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 
-#define SDMA_H_CONFIG_DSPDMA   (1 << 12) /* indicates if the DSPDMA is used */
-#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
-#define SDMA_H_CONFIG_ACR      (1 << 4)  /* indicates if AHB freq /core freq = 2 or 1 */
+#define SDMA_H_CONFIG_DSPDMA   BIT(12) /* indicates if the DSPDMA is used */
+#define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
+#define SDMA_H_CONFIG_ACR      BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
 #define SDMA_H_CONFIG_CSM      (3)       /* indicates which context switch mode is selected*/
 
 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
@@ -361,37 +365,42 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
 {
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
-       u32 evt, mcu, dsp;
+       unsigned long evt, mcu, dsp;
 
        if (event_override && mcu_override && dsp_override)
                return -EINVAL;
 
-       evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
-       mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
-       dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
+       evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
+       mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
+       dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
 
        if (dsp_override)
-               dsp &= ~(1 << channel);
+               __clear_bit(channel, &dsp);
        else
-               dsp |= (1 << channel);
+               __set_bit(channel, &dsp);
 
        if (event_override)
-               evt &= ~(1 << channel);
+               __clear_bit(channel, &evt);
        else
-               evt |= (1 << channel);
+               __set_bit(channel, &evt);
 
        if (mcu_override)
-               mcu &= ~(1 << channel);
+               __clear_bit(channel, &mcu);
        else
-               mcu |= (1 << channel);
+               __set_bit(channel, &mcu);
 
-       __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
-       __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
-       __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
+       writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
+       writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
+       writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
 
        return 0;
 }
 
+static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
+{
+       writel(BIT(channel), sdma->regs + SDMA_H_START);
+}
+
 /*
  * sdma_run_channel - run a channel and wait till it's done
  */
@@ -403,7 +412,7 @@ static int sdma_run_channel(struct sdma_channel *sdmac)
 
        init_completion(&sdmac->done);
 
-       __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
+       sdma_enable_channel(sdma, channel);
 
        ret = wait_for_completion_timeout(&sdmac->done, HZ);
 
@@ -450,12 +459,12 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 {
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
-       u32 val;
+       unsigned long val;
        u32 chnenbl = chnenbl_ofs(sdma, event);
 
-       val = __raw_readl(sdma->regs + chnenbl);
-       val |= (1 << channel);
-       __raw_writel(val, sdma->regs + chnenbl);
+       val = readl_relaxed(sdma->regs + chnenbl);
+       __set_bit(channel, &val);
+       writel_relaxed(val, sdma->regs + chnenbl);
 }
 
 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
@@ -463,11 +472,11 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
        u32 chnenbl = chnenbl_ofs(sdma, event);
-       u32 val;
+       unsigned long val;
 
-       val = __raw_readl(sdma->regs + chnenbl);
-       val &= ~(1 << channel);
-       __raw_writel(val, sdma->regs + chnenbl);
+       val = readl_relaxed(sdma->regs + chnenbl);
+       __clear_bit(channel, &val);
+       writel_relaxed(val, sdma->regs + chnenbl);
 }
 
 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
@@ -503,6 +512,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
        struct sdma_buffer_descriptor *bd;
        int i, error = 0;
 
+       sdmac->chn_real_count = 0;
        /*
         * non loop mode. Iterate over all descriptors, collect
         * errors and call callback function
@@ -512,6 +522,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
 
                 if (bd->mode.status & (BD_DONE | BD_RROR))
                        error = -EIO;
+                sdmac->chn_real_count += bd->mode.count;
        }
 
        if (error)
@@ -519,9 +530,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
        else
                sdmac->status = DMA_SUCCESS;
 
+       dma_cookie_complete(&sdmac->desc);
        if (sdmac->desc.callback)
                sdmac->desc.callback(sdmac->desc.callback_param);
-       sdmac->last_completed = sdmac->desc.cookie;
 }
 
 static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
@@ -541,10 +552,10 @@ static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 {
        struct sdma_engine *sdma = dev_id;
-       u32 stat;
+       unsigned long stat;
 
-       stat = __raw_readl(sdma->regs + SDMA_H_INTR);
-       __raw_writel(stat, sdma->regs + SDMA_H_INTR);
+       stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+       writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 
        while (stat) {
                int channel = fls(stat) - 1;
@@ -552,7 +563,7 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 
                mxc_sdma_handle_channel(sdmac);
 
-               stat &= ~(1 << channel);
+               __clear_bit(channel, &stat);
        }
 
        return IRQ_HANDLED;
@@ -650,7 +661,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
        int ret;
 
-       if (sdmac->direction == DMA_FROM_DEVICE) {
+       if (sdmac->direction == DMA_DEV_TO_MEM) {
                load_address = sdmac->pc_from_device;
        } else {
                load_address = sdmac->pc_to_device;
@@ -660,11 +671,11 @@ static int sdma_load_context(struct sdma_channel *sdmac)
                return load_address;
 
        dev_dbg(sdma->dev, "load_address = %d\n", load_address);
-       dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
+       dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
        dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
        dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
-       dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
-       dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
+       dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
+       dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
 
        mutex_lock(&sdma->channel_0_lock);
 
@@ -674,8 +685,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        /* Send by context the event mask,base address for peripheral
         * and watermark level
         */
-       context->gReg[0] = sdmac->event_mask1;
-       context->gReg[1] = sdmac->event_mask0;
+       context->gReg[0] = sdmac->event_mask[1];
+       context->gReg[1] = sdmac->event_mask[0];
        context->gReg[2] = sdmac->per_addr;
        context->gReg[6] = sdmac->shp_addr;
        context->gReg[7] = sdmac->watermark_level;
@@ -698,7 +709,7 @@ static void sdma_disable_channel(struct sdma_channel *sdmac)
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
 
-       __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
+       writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
        sdmac->status = DMA_ERROR;
 }
 
@@ -708,13 +719,13 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
 
        sdma_disable_channel(sdmac);
 
-       sdmac->event_mask0 = 0;
-       sdmac->event_mask1 = 0;
+       sdmac->event_mask[0] = 0;
+       sdmac->event_mask[1] = 0;
        sdmac->shp_addr = 0;
        sdmac->per_addr = 0;
 
        if (sdmac->event_id0) {
-               if (sdmac->event_id0 > 32)
+               if (sdmac->event_id0 >= sdmac->sdma->num_events)
                        return -EINVAL;
                sdma_event_enable(sdmac, sdmac->event_id0);
        }
@@ -737,15 +748,14 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
                        (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
                /* Handle multiple event channels differently */
                if (sdmac->event_id1) {
-                       sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
+                       sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
                        if (sdmac->event_id1 > 31)
-                               sdmac->watermark_level |= 1 << 31;
-                       sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
+                               __set_bit(31, &sdmac->watermark_level);
+                       sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
                        if (sdmac->event_id0 > 31)
-                               sdmac->watermark_level |= 1 << 30;
+                               __set_bit(30, &sdmac->watermark_level);
                } else {
-                       sdmac->event_mask0 = 1 << sdmac->event_id0;
-                       sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
+                       __set_bit(sdmac->event_id0, sdmac->event_mask);
                }
                /* Watermark Level */
                sdmac->watermark_level |= sdmac->watermark_level;
@@ -771,7 +781,7 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
                return -EINVAL;
        }
 
-       __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
+       writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
 
        return 0;
 }
@@ -793,8 +803,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
        sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
        sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 
-       clk_enable(sdma->clk);
-
        sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
 
        init_completion(&sdmac->done);
@@ -807,24 +815,6 @@ out:
        return ret;
 }
 
-static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
-{
-       __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
-}
-
-static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
-{
-       dma_cookie_t cookie = sdmac->chan.cookie;
-
-       if (++cookie < 0)
-               cookie = 1;
-
-       sdmac->chan.cookie = cookie;
-       sdmac->desc.cookie = cookie;
-
-       return cookie;
-}
-
 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 {
        return container_of(chan, struct sdma_channel, chan);
@@ -832,17 +822,15 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 
 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
+       unsigned long flags;
        struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
-       struct sdma_engine *sdma = sdmac->sdma;
        dma_cookie_t cookie;
 
-       spin_lock_irq(&sdmac->lock);
-
-       cookie = sdma_assign_cookie(sdmac);
+       spin_lock_irqsave(&sdmac->lock, flags);
 
-       sdma_enable_channel(sdma, sdmac->channel);
+       cookie = dma_cookie_assign(tx);
 
-       spin_unlock_irq(&sdmac->lock);
+       spin_unlock_irqrestore(&sdmac->lock, flags);
 
        return cookie;
 }
@@ -871,11 +859,14 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
 
        sdmac->peripheral_type = data->peripheral_type;
        sdmac->event_id0 = data->dma_request;
-       ret = sdma_set_channel_priority(sdmac, prio);
+
+       clk_enable(sdmac->sdma->clk);
+
+       ret = sdma_request_channel(sdmac);
        if (ret)
                return ret;
 
-       ret = sdma_request_channel(sdmac);
+       ret = sdma_set_channel_priority(sdmac, prio);
        if (ret)
                return ret;
 
@@ -911,8 +902,8 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
-               unsigned long flags)
+               unsigned int sg_len, enum dma_transfer_direction direction,
+               unsigned long flags, void *context)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
@@ -941,6 +932,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                goto err_out;
        }
 
+       sdmac->chn_count = 0;
        for_each_sg(sgl, sg, sg_len, i) {
                struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
                int param;
@@ -957,6 +949,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                }
 
                bd->mode.count = count;
+               sdmac->chn_count += count;
 
                if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
                        ret =  -EINVAL;
@@ -1008,7 +1001,8 @@ err_out:
 
 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction,
+               void *context)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
@@ -1093,15 +1087,18 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                sdma_disable_channel(sdmac);
                return 0;
        case DMA_SLAVE_CONFIG:
-               if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+               if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
                        sdmac->per_address = dmaengine_cfg->src_addr;
-                       sdmac->watermark_level = dmaengine_cfg->src_maxburst;
+                       sdmac->watermark_level = dmaengine_cfg->src_maxburst *
+                                               dmaengine_cfg->src_addr_width;
                        sdmac->word_size = dmaengine_cfg->src_addr_width;
                } else {
                        sdmac->per_address = dmaengine_cfg->dst_addr;
-                       sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+                       sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
+                                               dmaengine_cfg->dst_addr_width;
                        sdmac->word_size = dmaengine_cfg->dst_addr_width;
                }
+               sdmac->direction = dmaengine_cfg->direction;
                return sdma_config_channel(sdmac);
        default:
                return -ENOSYS;
@@ -1119,16 +1116,19 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
 
        last_used = chan->cookie;
 
-       dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+       dma_set_tx_state(txstate, chan->completed_cookie, last_used,
+                       sdmac->chn_count - sdmac->chn_real_count);
 
        return sdmac->status;
 }
 
 static void sdma_issue_pending(struct dma_chan *chan)
 {
-       /*
-        * Nothing to do. We only have a single descriptor
-        */
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       struct sdma_engine *sdma = sdmac->sdma;
+
+       if (sdmac->status == DMA_IN_PROGRESS)
+               sdma_enable_channel(sdma, sdmac->channel);
 }
 
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1        34
@@ -1220,7 +1220,7 @@ static int __init sdma_init(struct sdma_engine *sdma)
        clk_enable(sdma->clk);
 
        /* Be sure SDMA has not started yet */
-       __raw_writel(0, sdma->regs + SDMA_H_C0PTR);
+       writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
 
        sdma->channel_control = dma_alloc_coherent(NULL,
                        MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
@@ -1243,11 +1243,11 @@ static int __init sdma_init(struct sdma_engine *sdma)
 
        /* disable all channels */
        for (i = 0; i < sdma->num_events; i++)
-               __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
+               writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
 
        /* All channels have priority 0 */
        for (i = 0; i < MAX_DMA_CHANNELS; i++)
-               __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
+               writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
 
        ret = sdma_request_channel(&sdma->channel[0]);
        if (ret)
@@ -1256,16 +1256,16 @@ static int __init sdma_init(struct sdma_engine *sdma)
        sdma_config_ownership(&sdma->channel[0], false, true, false);
 
        /* Set Command Channel (Channel Zero) */
-       __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
+       writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
 
        /* Set bits of CONFIG register but with static context switching */
        /* FIXME: Check whether to set ACR bit depending on clock ratios */
-       __raw_writel(0, sdma->regs + SDMA_H_CONFIG);
+       writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
 
-       __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
+       writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
 
        /* Set bits of CONFIG register with given context switching mode */
-       __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+       writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
 
        /* Initializes channel's priorities */
        sdma_set_channel_priority(&sdma->channel[0], 7);
@@ -1357,6 +1357,7 @@ static int __init sdma_probe(struct platform_device *pdev)
                spin_lock_init(&sdmac->lock);
 
                sdmac->chan.device = &sdma->dma_device;
+               dma_cookie_init(&sdmac->chan);
                sdmac->channel = i;
 
                /*
@@ -1377,7 +1378,9 @@ static int __init sdma_probe(struct platform_device *pdev)
                sdma_add_scripts(sdma, pdata->script_addrs);
 
        if (pdata) {
-               sdma_get_firmware(sdma, pdata->fw_name);
+               ret = sdma_get_firmware(sdma, pdata->fw_name);
+               if (ret)
+                       dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
        } else {
                /*
                 * Because that device tree does not encode ROM script address,
@@ -1386,15 +1389,12 @@ static int __init sdma_probe(struct platform_device *pdev)
                 */
                ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
                                              &fw_name);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to get firmware name\n");
-                       goto err_init;
-               }
-
-               ret = sdma_get_firmware(sdma, fw_name);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to get firmware\n");
-                       goto err_init;
+               if (ret)
+                       dev_warn(&pdev->dev, "failed to get firmware name\n");
+               else {
+                       ret = sdma_get_firmware(sdma, fw_name);
+                       if (ret)
+                               dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
                }
        }