Merge branch 'spi/merge' into spi/next
authorGrant Likely <grant.likely@secretlab.ca>
Fri, 17 Jun 2011 14:32:26 +0000 (08:32 -0600)
committerGrant Likely <grant.likely@secretlab.ca>
Fri, 17 Jun 2011 14:32:26 +0000 (08:32 -0600)
71 files changed:
Documentation/devicetree/bindings/spi/spi_nvidia.txt [new file with mode: 0644]
Documentation/spi/ep93xx_spi
arch/arm/mach-ep93xx/Makefile
arch/arm/mach-ep93xx/core.c
arch/arm/mach-ep93xx/dma-m2p.c [deleted file]
arch/arm/mach-ep93xx/dma.c [new file with mode: 0644]
arch/arm/mach-ep93xx/include/mach/dma.h
arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/ep93xx_dma.c [new file with mode: 0644]
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/atmel_spi.h [deleted file]
drivers/spi/spi-altera.c [moved from drivers/spi/spi_altera.c with 100% similarity]
drivers/spi/spi-ath79.c [moved from drivers/spi/ath79_spi.c with 99% similarity]
drivers/spi/spi-atmel.c [moved from drivers/spi/atmel_spi.c with 85% similarity]
drivers/spi/spi-au1550.c [moved from drivers/spi/au1550_spi.c with 99% similarity]
drivers/spi/spi-bfin-sport.c [moved from drivers/spi/spi_bfin_sport.c with 100% similarity]
drivers/spi/spi-bfin5xx.c [moved from drivers/spi/spi_bfin5xx.c with 100% similarity]
drivers/spi/spi-bitbang-txrx.h [moved from drivers/spi/spi_bitbang_txrx.h with 100% similarity]
drivers/spi/spi-bitbang.c [moved from drivers/spi/spi_bitbang.c with 98% similarity]
drivers/spi/spi-butterfly.c [moved from drivers/spi/spi_butterfly.c with 99% similarity]
drivers/spi/spi-coldfire-qspi.c [moved from drivers/spi/coldfire_qspi.c with 100% similarity]
drivers/spi/spi-davinci.c [moved from drivers/spi/davinci_spi.c with 100% similarity]
drivers/spi/spi-dw-mid.c [moved from drivers/spi/dw_spi_mid.c with 98% similarity]
drivers/spi/spi-dw-mmio.c [moved from drivers/spi/dw_spi_mmio.c with 97% similarity]
drivers/spi/spi-dw-pci.c [moved from drivers/spi/dw_spi_pci.c with 98% similarity]
drivers/spi/spi-dw.c [moved from drivers/spi/dw_spi.c with 99% similarity]
drivers/spi/spi-dw.h [moved from drivers/spi/dw_spi.h with 100% similarity]
drivers/spi/spi-ep93xx.c [moved from drivers/spi/ep93xx_spi.c with 77% similarity]
drivers/spi/spi-fsl-espi.c [moved from drivers/spi/spi_fsl_espi.c with 99% similarity]
drivers/spi/spi-fsl-lib.c [moved from drivers/spi/spi_fsl_lib.c with 99% similarity]
drivers/spi/spi-fsl-lib.h [moved from drivers/spi/spi_fsl_lib.h with 100% similarity]
drivers/spi/spi-fsl-spi.c [moved from drivers/spi/spi_fsl_spi.c with 99% similarity]
drivers/spi/spi-gpio.c [moved from drivers/spi/spi_gpio.c with 99% similarity]
drivers/spi/spi-imx.c [moved from drivers/spi/spi_imx.c with 100% similarity]
drivers/spi/spi-lm70llp.c [moved from drivers/spi/spi_lm70llp.c with 98% similarity]
drivers/spi/spi-mpc512x-psc.c [moved from drivers/spi/mpc512x_psc_spi.c with 100% similarity]
drivers/spi/spi-mpc52xx-psc.c [moved from drivers/spi/mpc52xx_psc_spi.c with 100% similarity]
drivers/spi/spi-mpc52xx.c [moved from drivers/spi/mpc52xx_spi.c with 100% similarity]
drivers/spi/spi-nuc900.c [moved from drivers/spi/spi_nuc900.c with 99% similarity]
drivers/spi/spi-oc-tiny.c [moved from drivers/spi/spi_oc_tiny.c with 100% similarity]
drivers/spi/spi-omap-100k.c [moved from drivers/spi/omap_spi_100k.c with 100% similarity]
drivers/spi/spi-omap-uwire.c [moved from drivers/spi/omap_uwire.c with 99% similarity]
drivers/spi/spi-omap2-mcspi.c [moved from drivers/spi/omap2_mcspi.c with 99% similarity]
drivers/spi/spi-orion.c [moved from drivers/spi/orion_spi.c with 98% similarity]
drivers/spi/spi-pl022.c [moved from drivers/spi/amba-pl022.c with 96% similarity]
drivers/spi/spi-ppc4xx.c [moved from drivers/spi/spi_ppc4xx.c with 99% similarity]
drivers/spi/spi-pxa2xx-pci.c [moved from drivers/spi/pxa2xx_spi_pci.c with 100% similarity]
drivers/spi/spi-pxa2xx.c [moved from drivers/spi/pxa2xx_spi.c with 100% similarity]
drivers/spi/spi-s3c24xx-fiq.S [moved from drivers/spi/spi_s3c24xx_fiq.S with 99% similarity]
drivers/spi/spi-s3c24xx-fiq.h [moved from drivers/spi/spi_s3c24xx_fiq.h with 100% similarity]
drivers/spi/spi-s3c24xx-gpio.c [moved from drivers/spi/spi_s3c24xx_gpio.c with 98% similarity]
drivers/spi/spi-s3c24xx.c [moved from drivers/spi/spi_s3c24xx.c with 99% similarity]
drivers/spi/spi-s3c64xx.c [moved from drivers/spi/spi_s3c64xx.c with 99% similarity]
drivers/spi/spi-sh-msiof.c [moved from drivers/spi/spi_sh_msiof.c with 100% similarity]
drivers/spi/spi-sh-sci.c [moved from drivers/spi/spi_sh_sci.c with 99% similarity]
drivers/spi/spi-sh.c [moved from drivers/spi/spi_sh.c with 100% similarity]
drivers/spi/spi-stmp.c [moved from drivers/spi/spi_stmp.c with 100% similarity]
drivers/spi/spi-tegra.c [moved from drivers/spi/spi_tegra.c with 96% similarity]
drivers/spi/spi-ti-ssp.c [moved from drivers/spi/ti-ssp-spi.c with 100% similarity]
drivers/spi/spi-tle62x0.c [moved from drivers/spi/tle62x0.c with 99% similarity]
drivers/spi/spi-topcliff-pch.c [moved from drivers/spi/spi_topcliff_pch.c with 53% similarity]
drivers/spi/spi-txx9.c [moved from drivers/spi/spi_txx9.c with 99% similarity]
drivers/spi/spi-xilinx.c [moved from drivers/spi/xilinx_spi.c with 100% similarity]
drivers/spi/spi.c
drivers/spi/spidev.c
sound/soc/ep93xx/ep93xx-ac97.c
sound/soc/ep93xx/ep93xx-i2s.c
sound/soc/ep93xx/ep93xx-pcm.c

diff --git a/Documentation/devicetree/bindings/spi/spi_nvidia.txt b/Documentation/devicetree/bindings/spi/spi_nvidia.txt
new file mode 100644 (file)
index 0000000..bde450b
--- /dev/null
@@ -0,0 +1,5 @@
+NVIDIA Tegra 2 SPI device
+
+Required properties:
+- compatible : should be "nvidia,tegra250-spi".
+- gpios : should specify GPIOs used for chipselect.
index 6325f5b..d8eb01c 100644 (file)
@@ -88,6 +88,16 @@ static void __init ts72xx_init_machine(void)
                            ARRAY_SIZE(ts72xx_spi_devices));
 }
 
+The driver can use DMA for the transfers also. In this case ts72xx_spi_info
+becomes:
+
+static struct ep93xx_spi_info ts72xx_spi_info = {
+       .num_chipselect = ARRAY_SIZE(ts72xx_spi_devices),
+       .use_dma        = true;
+};
+
+Note that CONFIG_EP93XX_DMA should be enabled as well.
+
 Thanks to
 =========
 Martin Guy, H. Hartley Sweeten and others who helped me during development of
index 33ee2c8..21e721a 100644 (file)
@@ -1,11 +1,13 @@
 #
 # Makefile for the linux kernel.
 #
-obj-y                  := core.o clock.o dma-m2p.o gpio.o
+obj-y                  := core.o clock.o gpio.o
 obj-m                  :=
 obj-n                  :=
 obj-                   :=
 
+obj-$(CONFIG_EP93XX_DMA)       += dma.o
+
 obj-$(CONFIG_MACH_ADSSPHERE)   += adssphere.o
 obj-$(CONFIG_MACH_EDB93XX)     += edb93xx.o
 obj-$(CONFIG_MACH_GESBC9312)   += gesbc9312.o
index 1d4b65f..ce07e0a 100644 (file)
@@ -492,11 +492,15 @@ static struct resource ep93xx_spi_resources[] = {
        },
 };
 
+static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32);
+
 static struct platform_device ep93xx_spi_device = {
        .name           = "ep93xx-spi",
        .id             = 0,
        .dev            = {
-               .platform_data = &ep93xx_spi_master_data,
+               .platform_data          = &ep93xx_spi_master_data,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+               .dma_mask               = &ep93xx_spi_dma_mask,
        },
        .num_resources  = ARRAY_SIZE(ep93xx_spi_resources),
        .resource       = ep93xx_spi_resources,
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c
deleted file mode 100644 (file)
index a696d35..0000000
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/dma-m2p.c
- * M2P DMA handling for Cirrus EP93xx chips.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- * Copyright (C) 2006 Applied Data Systems
- *
- * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-/*
- * On the EP93xx chip the following peripherals my be allocated to the 10
- * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
- *
- *     I2S     contains 3 Tx and 3 Rx DMA Channels
- *     AAC     contains 3 Tx and 3 Rx DMA Channels
- *     UART1   contains 1 Tx and 1 Rx DMA Channels
- *     UART2   contains 1 Tx and 1 Rx DMA Channels
- *     UART3   contains 1 Tx and 1 Rx DMA Channels
- *     IrDA    contains 1 Tx and 1 Rx DMA Channels
- *
- * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
- * with this implementation.
- */
-
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/io.h>
-
-#include <mach/dma.h>
-#include <mach/hardware.h>
-
-#define M2P_CONTROL                    0x00
-#define  M2P_CONTROL_STALL_IRQ_EN      (1 << 0)
-#define  M2P_CONTROL_NFB_IRQ_EN                (1 << 1)
-#define  M2P_CONTROL_ERROR_IRQ_EN      (1 << 3)
-#define  M2P_CONTROL_ENABLE            (1 << 4)
-#define M2P_INTERRUPT                  0x04
-#define  M2P_INTERRUPT_STALL           (1 << 0)
-#define  M2P_INTERRUPT_NFB             (1 << 1)
-#define  M2P_INTERRUPT_ERROR           (1 << 3)
-#define M2P_PPALLOC                    0x08
-#define M2P_STATUS                     0x0c
-#define M2P_REMAIN                     0x14
-#define M2P_MAXCNT0                    0x20
-#define M2P_BASE0                      0x24
-#define M2P_MAXCNT1                    0x30
-#define M2P_BASE1                      0x34
-
-#define STATE_IDLE     0       /* Channel is inactive.  */
-#define STATE_STALL    1       /* Channel is active, no buffers pending.  */
-#define STATE_ON       2       /* Channel is active, one buffer pending.  */
-#define STATE_NEXT     3       /* Channel is active, two buffers pending.  */
-
-struct m2p_channel {
-       char                            *name;
-       void __iomem                    *base;
-       int                             irq;
-
-       struct clk                      *clk;
-       spinlock_t                      lock;
-
-       void                            *client;
-       unsigned                        next_slot:1;
-       struct ep93xx_dma_buffer        *buffer_xfer;
-       struct ep93xx_dma_buffer        *buffer_next;
-       struct list_head                buffers_pending;
-};
-
-static struct m2p_channel m2p_rx[] = {
-       {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
-       {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
-       {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
-       {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
-       {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
-       {NULL},
-};
-
-static struct m2p_channel m2p_tx[] = {
-       {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
-       {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
-       {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
-       {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
-       {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
-       {NULL},
-};
-
-static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
-{
-       if (ch->next_slot == 0) {
-               writel(buf->size, ch->base + M2P_MAXCNT0);
-               writel(buf->bus_addr, ch->base + M2P_BASE0);
-       } else {
-               writel(buf->size, ch->base + M2P_MAXCNT1);
-               writel(buf->bus_addr, ch->base + M2P_BASE1);
-       }
-       ch->next_slot ^= 1;
-}
-
-static void choose_buffer_xfer(struct m2p_channel *ch)
-{
-       struct ep93xx_dma_buffer *buf;
-
-       ch->buffer_xfer = NULL;
-       if (!list_empty(&ch->buffers_pending)) {
-               buf = list_entry(ch->buffers_pending.next,
-                                struct ep93xx_dma_buffer, list);
-               list_del(&buf->list);
-               feed_buf(ch, buf);
-               ch->buffer_xfer = buf;
-       }
-}
-
-static void choose_buffer_next(struct m2p_channel *ch)
-{
-       struct ep93xx_dma_buffer *buf;
-
-       ch->buffer_next = NULL;
-       if (!list_empty(&ch->buffers_pending)) {
-               buf = list_entry(ch->buffers_pending.next,
-                                struct ep93xx_dma_buffer, list);
-               list_del(&buf->list);
-               feed_buf(ch, buf);
-               ch->buffer_next = buf;
-       }
-}
-
-static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
-{
-       /*
-        * The control register must be read immediately after being written so
-        * that the internal state machine is correctly updated. See the ep93xx
-        * users' guide for details.
-        */
-       writel(v, ch->base + M2P_CONTROL);
-       readl(ch->base + M2P_CONTROL);
-}
-
-static inline int m2p_channel_state(struct m2p_channel *ch)
-{
-       return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
-}
-
-static irqreturn_t m2p_irq(int irq, void *dev_id)
-{
-       struct m2p_channel *ch = dev_id;
-       struct ep93xx_dma_m2p_client *cl;
-       u32 irq_status, v;
-       int error = 0;
-
-       cl = ch->client;
-
-       spin_lock(&ch->lock);
-       irq_status = readl(ch->base + M2P_INTERRUPT);
-
-       if (irq_status & M2P_INTERRUPT_ERROR) {
-               writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
-               error = 1;
-       }
-
-       if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
-               spin_unlock(&ch->lock);
-               return IRQ_NONE;
-       }
-
-       switch (m2p_channel_state(ch)) {
-       case STATE_IDLE:
-               pr_crit("dma interrupt without a dma buffer\n");
-               BUG();
-               break;
-
-       case STATE_STALL:
-               cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
-               if (ch->buffer_next != NULL) {
-                       cl->buffer_finished(cl->cookie, ch->buffer_next,
-                                           0, error);
-               }
-               choose_buffer_xfer(ch);
-               choose_buffer_next(ch);
-               if (ch->buffer_xfer != NULL)
-                       cl->buffer_started(cl->cookie, ch->buffer_xfer);
-               break;
-
-       case STATE_ON:
-               cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
-               ch->buffer_xfer = ch->buffer_next;
-               choose_buffer_next(ch);
-               cl->buffer_started(cl->cookie, ch->buffer_xfer);
-               break;
-
-       case STATE_NEXT:
-               pr_crit("dma interrupt while next\n");
-               BUG();
-               break;
-       }
-
-       v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
-                                             M2P_CONTROL_NFB_IRQ_EN);
-       if (ch->buffer_xfer != NULL)
-               v |= M2P_CONTROL_STALL_IRQ_EN;
-       if (ch->buffer_next != NULL)
-               v |= M2P_CONTROL_NFB_IRQ_EN;
-       m2p_set_control(ch, v);
-
-       spin_unlock(&ch->lock);
-       return IRQ_HANDLED;
-}
-
-static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
-{
-       struct m2p_channel *ch;
-       int i;
-
-       if (cl->flags & EP93XX_DMA_M2P_RX)
-               ch = m2p_rx;
-       else
-               ch = m2p_tx;
-
-       for (i = 0; ch[i].base; i++) {
-               struct ep93xx_dma_m2p_client *client;
-
-               client = ch[i].client;
-               if (client != NULL) {
-                       int port;
-
-                       port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
-                       if (port == (client->flags &
-                                    EP93XX_DMA_M2P_PORT_MASK)) {
-                               pr_warning("DMA channel already used by %s\n",
-                                          cl->name ? : "unknown client");
-                               return ERR_PTR(-EBUSY);
-                       }
-               }
-       }
-
-       for (i = 0; ch[i].base; i++) {
-               if (ch[i].client == NULL)
-                       return ch + i;
-       }
-
-       pr_warning("No free DMA channel for %s\n",
-                  cl->name ? : "unknown client");
-       return ERR_PTR(-ENODEV);
-}
-
-static void channel_enable(struct m2p_channel *ch)
-{
-       struct ep93xx_dma_m2p_client *cl = ch->client;
-       u32 v;
-
-       clk_enable(ch->clk);
-
-       v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
-       writel(v, ch->base + M2P_PPALLOC);
-
-       v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
-       v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
-       m2p_set_control(ch, v);
-}
-
-static void channel_disable(struct m2p_channel *ch)
-{
-       u32 v;
-
-       v = readl(ch->base + M2P_CONTROL);
-       v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
-       m2p_set_control(ch, v);
-
-       while (m2p_channel_state(ch) >= STATE_ON)
-               cpu_relax();
-
-       m2p_set_control(ch, 0x0);
-
-       while (m2p_channel_state(ch) == STATE_STALL)
-               cpu_relax();
-
-       clk_disable(ch->clk);
-}
-
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
-{
-       struct m2p_channel *ch;
-       int err;
-
-       ch = find_free_channel(cl);
-       if (IS_ERR(ch))
-               return PTR_ERR(ch);
-
-       err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
-       if (err)
-               return err;
-
-       ch->client = cl;
-       ch->next_slot = 0;
-       ch->buffer_xfer = NULL;
-       ch->buffer_next = NULL;
-       INIT_LIST_HEAD(&ch->buffers_pending);
-
-       cl->channel = ch;
-
-       channel_enable(ch);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
-
-void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
-{
-       struct m2p_channel *ch = cl->channel;
-
-       channel_disable(ch);
-       free_irq(ch->irq, ch);
-       ch->client = NULL;
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
-
-void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
-                          struct ep93xx_dma_buffer *buf)
-{
-       struct m2p_channel *ch = cl->channel;
-       unsigned long flags;
-       u32 v;
-
-       spin_lock_irqsave(&ch->lock, flags);
-       v = readl(ch->base + M2P_CONTROL);
-       if (ch->buffer_xfer == NULL) {
-               ch->buffer_xfer = buf;
-               feed_buf(ch, buf);
-               cl->buffer_started(cl->cookie, buf);
-
-               v |= M2P_CONTROL_STALL_IRQ_EN;
-               m2p_set_control(ch, v);
-
-       } else if (ch->buffer_next == NULL) {
-               ch->buffer_next = buf;
-               feed_buf(ch, buf);
-
-               v |= M2P_CONTROL_NFB_IRQ_EN;
-               m2p_set_control(ch, v);
-       } else {
-               list_add_tail(&buf->list, &ch->buffers_pending);
-       }
-       spin_unlock_irqrestore(&ch->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
-
-void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
-                                    struct ep93xx_dma_buffer *buf)
-{
-       struct m2p_channel *ch = cl->channel;
-
-       list_add_tail(&buf->list, &ch->buffers_pending);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
-
-void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
-{
-       struct m2p_channel *ch = cl->channel;
-
-       channel_disable(ch);
-       ch->next_slot = 0;
-       ch->buffer_xfer = NULL;
-       ch->buffer_next = NULL;
-       INIT_LIST_HEAD(&ch->buffers_pending);
-       channel_enable(ch);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
-
-static int init_channel(struct m2p_channel *ch)
-{
-       ch->clk = clk_get(NULL, ch->name);
-       if (IS_ERR(ch->clk))
-               return PTR_ERR(ch->clk);
-
-       spin_lock_init(&ch->lock);
-       ch->client = NULL;
-
-       return 0;
-}
-
-static int __init ep93xx_dma_m2p_init(void)
-{
-       int i;
-       int ret;
-
-       for (i = 0; m2p_rx[i].base; i++) {
-               ret = init_channel(m2p_rx + i);
-               if (ret)
-                       return ret;
-       }
-
-       for (i = 0; m2p_tx[i].base; i++) {
-               ret = init_channel(m2p_tx + i);
-               if (ret)
-                       return ret;
-       }
-
-       pr_info("M2P DMA subsystem initialized\n");
-       return 0;
-}
-arch_initcall(ep93xx_dma_m2p_init);
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c
new file mode 100644 (file)
index 0000000..5a25708
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * arch/arm/mach-ep93xx/dma.c
+ *
+ * Platform support code for the EP93xx dmaengine driver.
+ *
+ * Copyright (C) 2011 Mika Westerberg
+ *
+ * This work is based on the original dma-m2p implementation with
+ * following copyrights:
+ *
+ *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *   Copyright (C) 2006 Applied Data Systems
+ *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/dma.h>
+#include <mach/hardware.h>
+
+#define DMA_CHANNEL(_name, _base, _irq) \
+       { .name = (_name), .base = (_base), .irq = (_irq) }
+
+/*
+ * DMA M2P channels.
+ *
+ * On the EP93xx chip the following peripherals my be allocated to the 10
+ * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
+ *
+ *     I2S     contains 3 Tx and 3 Rx DMA Channels
+ *     AAC     contains 3 Tx and 3 Rx DMA Channels
+ *     UART1   contains 1 Tx and 1 Rx DMA Channels
+ *     UART2   contains 1 Tx and 1 Rx DMA Channels
+ *     UART3   contains 1 Tx and 1 Rx DMA Channels
+ *     IrDA    contains 1 Tx and 1 Rx DMA Channels
+ *
+ * Registers are mapped statically in ep93xx_map_io().
+ */
+static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = {
+       DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0),
+       DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1),
+       DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2),
+       DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3),
+       DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4),
+       DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5),
+       DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6),
+       DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7),
+       DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8),
+       DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9),
+};
+
+static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
+       .channels               = ep93xx_dma_m2p_channels,
+       .num_channels           = ARRAY_SIZE(ep93xx_dma_m2p_channels),
+};
+
+static struct platform_device ep93xx_dma_m2p_device = {
+       .name                   = "ep93xx-dma-m2p",
+       .id                     = -1,
+       .dev                    = {
+               .platform_data  = &ep93xx_dma_m2p_data,
+       },
+};
+
+/*
+ * DMA M2M channels.
+ *
+ * There are 2 M2M channels which support memcpy/memset and in addition simple
+ * hardware requests from/to SSP and IDE. We do not implement an external
+ * hardware requests.
+ *
+ * Registers are mapped statically in ep93xx_map_io().
+ */
+static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = {
+       DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0),
+       DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1),
+};
+
+static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
+       .channels               = ep93xx_dma_m2m_channels,
+       .num_channels           = ARRAY_SIZE(ep93xx_dma_m2m_channels),
+};
+
+static struct platform_device ep93xx_dma_m2m_device = {
+       .name                   = "ep93xx-dma-m2m",
+       .id                     = -1,
+       .dev                    = {
+               .platform_data  = &ep93xx_dma_m2m_data,
+       },
+};
+
+static int __init ep93xx_dma_init(void)
+{
+       platform_device_register(&ep93xx_dma_m2p_device);
+       platform_device_register(&ep93xx_dma_m2m_device);
+       return 0;
+}
+arch_initcall(ep93xx_dma_init);
index 5e31b2b..46d4d87 100644 (file)
-/**
- * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
- *
- * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
- * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
- * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
- * engine.
- *
- * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
- *
- */
-
 #ifndef __ASM_ARCH_DMA_H
 #define __ASM_ARCH_DMA_H
 
-#include <linux/list.h>
 #include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
 
-/**
- * struct ep93xx_dma_buffer - Information about a buffer to be transferred
- * using the DMA M2P engine
+/*
+ * M2P channels.
  *
- * @list: Entry in DMA buffer list
- * @bus_addr: Physical address of the buffer
- * @size: Size of the buffer in bytes
+ * Note that these values are also directly used for setting the PPALLOC
+ * register.
  */
-struct ep93xx_dma_buffer {
-       struct list_head        list;
-       u32                     bus_addr;
-       u16                     size;
-};
+#define EP93XX_DMA_I2S1                0
+#define EP93XX_DMA_I2S2                1
+#define EP93XX_DMA_AAC1                2
+#define EP93XX_DMA_AAC2                3
+#define EP93XX_DMA_AAC3                4
+#define EP93XX_DMA_I2S3                5
+#define EP93XX_DMA_UART1       6
+#define EP93XX_DMA_UART2       7
+#define EP93XX_DMA_UART3       8
+#define EP93XX_DMA_IRDA                9
+/* M2M channels */
+#define EP93XX_DMA_SSP         10
+#define EP93XX_DMA_IDE         11
 
 /**
- * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
- *
- * @name: Unique name for this client
- * @flags: Client flags
- * @cookie: User data to pass to callback functions
- * @buffer_started: Non NULL function to call when a transfer is started.
- *                     The arguments are the user data cookie and the DMA
- *                     buffer which is starting.
- * @buffer_finished: Non NULL function to call when a transfer is completed.
- *                     The arguments are the user data cookie, the DMA buffer
- *                     which has completed, and a boolean flag indicating if
- *                     the transfer had an error.
+ * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
+ * @port: peripheral which is requesting the channel
+ * @direction: TX/RX channel
+ * @name: optional name for the channel, this is displayed in /proc/interrupts
+ *
+ * This information is passed as private channel parameter in a filter
+ * function. Note that this is only needed for slave/cyclic channels.  For
+ * memcpy channels %NULL data should be passed.
  */
-struct ep93xx_dma_m2p_client {
-       char                    *name;
-       u8                      flags;
-       void                    *cookie;
-       void                    (*buffer_started)(void *cookie,
-                                       struct ep93xx_dma_buffer *buf);
-       void                    (*buffer_finished)(void *cookie,
-                                       struct ep93xx_dma_buffer *buf,
-                                       int bytes, int error);
-
-       /* private: Internal use only */
-       void                    *channel;
+struct ep93xx_dma_data {
+       int                             port;
+       enum dma_data_direction         direction;
+       const char                      *name;
 };
 
-/* DMA M2P ports */
-#define EP93XX_DMA_M2P_PORT_I2S1       0x00
-#define EP93XX_DMA_M2P_PORT_I2S2       0x01
-#define EP93XX_DMA_M2P_PORT_AAC1       0x02
-#define EP93XX_DMA_M2P_PORT_AAC2       0x03
-#define EP93XX_DMA_M2P_PORT_AAC3       0x04
-#define EP93XX_DMA_M2P_PORT_I2S3       0x05
-#define EP93XX_DMA_M2P_PORT_UART1      0x06
-#define EP93XX_DMA_M2P_PORT_UART2      0x07
-#define EP93XX_DMA_M2P_PORT_UART3      0x08
-#define EP93XX_DMA_M2P_PORT_IRDA       0x09
-#define EP93XX_DMA_M2P_PORT_MASK       0x0f
-
-/* DMA M2P client flags */
-#define EP93XX_DMA_M2P_TX              0x00    /* Memory to peripheral */
-#define EP93XX_DMA_M2P_RX              0x10    /* Peripheral to memory */
-
-/*
- * DMA M2P client error handling flags. See the EP93xx users guide
- * documentation on the DMA M2P CONTROL register for more details
- */
-#define EP93XX_DMA_M2P_ABORT_ON_ERROR  0x20    /* Abort on peripheral error */
-#define EP93XX_DMA_M2P_IGNORE_ERROR    0x40    /* Ignore peripheral errors */
-#define EP93XX_DMA_M2P_ERROR_MASK      0x60    /* Mask of error bits */
-
 /**
- * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
- * subsystem
- *
- * @m2p: Client information to register
- * returns 0 on success
- *
- * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
- * client
+ * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
+ * @name: name of the channel, used for getting the right clock for the channel
+ * @base: mapped registers
+ * @irq: interrupt number used by this channel
  */
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+struct ep93xx_dma_chan_data {
+       const char                      *name;
+       void __iomem                    *base;
+       int                             irq;
+};
 
 /**
- * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
- * subsystem
- *
- * @m2p: Client to unregister
+ * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
+ * @channels: array of channels which are passed to the driver
+ * @num_channels: number of channels in the array
  *
- * Any transfers currently in progress will be completed in hardware, but
- * ignored in software.
+ * This structure is passed to the DMA engine driver via platform data. For
+ * M2P channels, contract is that even channels are for TX and odd for RX.
+ * There is no requirement for the M2M channels.
  */
-void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
+struct ep93xx_dma_platform_data {
+       struct ep93xx_dma_chan_data     *channels;
+       size_t                          num_channels;
+};
 
-/**
- * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
- *
- * @m2p: DMA Client to submit the transfer on
- * @buf: DMA Buffer to submit
- *
- * If the current or next transfer positions are free on the M2P client then
- * the transfer is started immediately. If not, the transfer is added to the
- * list of pending transfers. This function must not be called from the
- * buffer_finished callback for an M2P channel.
- *
- */
-void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
-                          struct ep93xx_dma_buffer *buf);
+static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
+{
+       return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
+}
 
 /**
- * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
- * for an M2P channel
+ * ep93xx_dma_chan_direction - returns direction the channel can be used
+ * @chan: channel
  *
- * @m2p: DMA Client to submit the transfer on
- * @buf: DMA Buffer to submit
- *
- * This function must only be called from the buffer_finished callback for an
- * M2P channel. It is commonly used to add the next transfer in a chained list
- * of DMA transfers.
+ * This function can be used in filter functions to find out whether the
+ * channel supports given DMA direction. Only M2P channels have such
+ * limitation, for M2M channels the direction is configurable.
  */
-void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
-                                    struct ep93xx_dma_buffer *buf);
+static inline enum dma_data_direction
+ep93xx_dma_chan_direction(struct dma_chan *chan)
+{
+       if (!ep93xx_dma_chan_is_m2p(chan))
+               return DMA_NONE;
 
-/**
- * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
- *
- * @m2p: DMA client to flush transfers on
- *
- * Any transfers currently in progress will be completed in hardware, but
- * ignored in software.
- *
- */
-void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
+       /* even channels are for TX, odd for RX */
+       return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
 
 #endif /* __ASM_ARCH_DMA_H */
index 0a37961..9bb63ac 100644 (file)
@@ -7,9 +7,11 @@ struct spi_device;
  * struct ep93xx_spi_info - EP93xx specific SPI descriptor
  * @num_chipselect: number of chip selects on this board, must be
  *                  at least one
+ * @use_dma: use DMA for the transfers
  */
 struct ep93xx_spi_info {
        int     num_chipselect;
+       bool    use_dma;
 };
 
 /**
index 25cf327..2e3b3d3 100644 (file)
@@ -237,6 +237,13 @@ config MXS_DMA
          Support the MXS DMA engine. This engine including APBH-DMA
          and APBX-DMA is integrated into Freescale i.MX23/28 chips.
 
+config EP93XX_DMA
+       bool "Cirrus Logic EP93xx DMA support"
+       depends on ARCH_EP93XX
+       select DMA_ENGINE
+       help
+         Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
 config DMA_ENGINE
        bool
 
index 836095a..30cf3b1 100644 (file)
@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
new file mode 100644 (file)
index 0000000..0766c1e
--- /dev/null
@@ -0,0 +1,1355 @@
+/*
+ * Driver for the Cirrus Logic EP93xx DMA Controller
+ *
+ * Copyright (C) 2011 Mika Westerberg
+ *
+ * DMA M2P implementation is based on the original
+ * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
+ *
+ *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ *   Copyright (C) 2006 Applied Data Systems
+ *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
+ *
+ * This driver is based on dw_dmac and amba-pl08x drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <mach/dma.h>
+
+/* M2P registers */
+#define M2P_CONTROL                    0x0000
+#define M2P_CONTROL_STALLINT           BIT(0)
+#define M2P_CONTROL_NFBINT             BIT(1)
+#define M2P_CONTROL_CH_ERROR_INT       BIT(3)
+#define M2P_CONTROL_ENABLE             BIT(4)
+#define M2P_CONTROL_ICE                        BIT(6)
+
+#define M2P_INTERRUPT                  0x0004
+#define M2P_INTERRUPT_STALL            BIT(0)
+#define M2P_INTERRUPT_NFB              BIT(1)
+#define M2P_INTERRUPT_ERROR            BIT(3)
+
+#define M2P_PPALLOC                    0x0008
+#define M2P_STATUS                     0x000c
+
+#define M2P_MAXCNT0                    0x0020
+#define M2P_BASE0                      0x0024
+#define M2P_MAXCNT1                    0x0030
+#define M2P_BASE1                      0x0034
+
+#define M2P_STATE_IDLE                 0
+#define M2P_STATE_STALL                        1
+#define M2P_STATE_ON                   2
+#define M2P_STATE_NEXT                 3
+
+/* M2M registers */
+#define M2M_CONTROL                    0x0000
+#define M2M_CONTROL_DONEINT            BIT(2)
+#define M2M_CONTROL_ENABLE             BIT(3)
+#define M2M_CONTROL_START              BIT(4)
+#define M2M_CONTROL_DAH                        BIT(11)
+#define M2M_CONTROL_SAH                        BIT(12)
+#define M2M_CONTROL_PW_SHIFT           9
+#define M2M_CONTROL_PW_8               (0 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_16              (1 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_32              (2 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_MASK            (3 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_TM_SHIFT           13
+#define M2M_CONTROL_TM_TX              (1 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_TM_RX              (2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_RSS_SHIFT          22
+#define M2M_CONTROL_RSS_SSPRX          (1 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_SSPTX          (2 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_IDE            (3 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_NO_HDSK            BIT(24)
+#define M2M_CONTROL_PWSC_SHIFT         25
+
+#define M2M_INTERRUPT                  0x0004
+#define M2M_INTERRUPT_DONEINT          BIT(1)
+
+#define M2M_BCR0                       0x0010
+#define M2M_BCR1                       0x0014
+#define M2M_SAR_BASE0                  0x0018
+#define M2M_SAR_BASE1                  0x001c
+#define M2M_DAR_BASE0                  0x002c
+#define M2M_DAR_BASE1                  0x0030
+
+#define DMA_MAX_CHAN_BYTES             0xffff
+#define DMA_MAX_CHAN_DESCRIPTORS       32
+
+struct ep93xx_dma_engine;
+
+/**
+ * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
+ * @src_addr: source address of the transaction
+ * @dst_addr: destination address of the transaction
+ * @size: size of the transaction (in bytes)
+ * @complete: this descriptor is completed
+ * @txd: dmaengine API descriptor
+ * @tx_list: list of linked descriptors
+ * @node: link used for putting this into a channel queue
+ */
+struct ep93xx_dma_desc {
+       u32                             src_addr;
+       u32                             dst_addr;
+       size_t                          size;
+       bool                            complete;
+       struct dma_async_tx_descriptor  txd;
+       struct list_head                tx_list;
+       struct list_head                node;
+};
+
+/**
+ * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
+ * @chan: dmaengine API channel
+ * @edma: pointer to to the engine device
+ * @regs: memory mapped registers
+ * @irq: interrupt number of the channel
+ * @clk: clock used by this channel
+ * @tasklet: channel specific tasklet used for callbacks
+ * @lock: lock protecting the fields following
+ * @flags: flags for the channel
+ * @buffer: which buffer to use next (0/1)
+ * @last_completed: last completed cookie value
+ * @active: flattened chain of descriptors currently being processed
+ * @queue: pending descriptors which are handled next
+ * @free_list: list of free descriptors which can be used
+ * @runtime_addr: physical address currently used as dest/src (M2M only). This
+ *                is set via %DMA_SLAVE_CONFIG before slave operation is
+ *                prepared
+ * @runtime_ctrl: M2M runtime values for the control register.
+ *
+ * As EP93xx DMA controller doesn't support real chained DMA descriptors we
+ * will have slightly different scheme here: @active points to a head of
+ * flattened DMA descriptor chain.
+ *
+ * @queue holds pending transactions. These are linked through the first
+ * descriptor in the chain. When a descriptor is moved to the @active queue,
+ * the first and chained descriptors are flattened into a single list.
+ *
+ * @chan.private holds pointer to &struct ep93xx_dma_data which contains
+ * necessary channel configuration information. For memcpy channels this must
+ * be %NULL.
+ */
+struct ep93xx_dma_chan {
+       struct dma_chan                 chan;
+       const struct ep93xx_dma_engine  *edma;
+       void __iomem                    *regs;
+       int                             irq;
+       struct clk                      *clk;
+       struct tasklet_struct           tasklet;
+       /* protects the fields following */
+       spinlock_t                      lock;
+       unsigned long                   flags;
+/* Channel is configured for cyclic transfers */
+#define EP93XX_DMA_IS_CYCLIC           0
+
+       int                             buffer;
+       dma_cookie_t                    last_completed;
+       struct list_head                active;
+       struct list_head                queue;
+       struct list_head                free_list;
+       u32                             runtime_addr;
+       u32                             runtime_ctrl;
+};
+
+/**
+ * struct ep93xx_dma_engine - the EP93xx DMA engine instance
+ * @dma_dev: holds the dmaengine device
+ * @m2m: is this an M2M or M2P device
+ * @hw_setup: method which sets the channel up for operation
+ * @hw_shutdown: shuts the channel down and flushes whatever is left
+ * @hw_submit: pushes active descriptor(s) to the hardware
+ * @hw_interrupt: handle the interrupt
+ * @num_channels: number of channels for this instance
+ * @channels: array of channels
+ *
+ * There is one instance of this struct for the M2P channels and one for the
+ * M2M channels. hw_xxx() methods are used to perform operations which are
+ * different on M2M and M2P channels. These methods are called with channel
+ * lock held and interrupts disabled so they cannot sleep.
+ */
+struct ep93xx_dma_engine {
+       struct dma_device       dma_dev;
+       bool                    m2m;
+       int                     (*hw_setup)(struct ep93xx_dma_chan *);
+       void                    (*hw_shutdown)(struct ep93xx_dma_chan *);
+       void                    (*hw_submit)(struct ep93xx_dma_chan *);
+       int                     (*hw_interrupt)(struct ep93xx_dma_chan *);
+#define INTERRUPT_UNKNOWN      0
+#define INTERRUPT_DONE         1
+#define INTERRUPT_NEXT_BUFFER  2
+
+       size_t                  num_channels;
+       struct ep93xx_dma_chan  channels[];
+};
+
+static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
+{
+       return &edmac->chan.dev->device;
+}
+
+static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct ep93xx_dma_chan, chan);
+}
+
+/**
+ * ep93xx_dma_set_active - set new active descriptor chain
+ * @edmac: channel
+ * @desc: head of the new active descriptor chain
+ *
+ * Sets @desc to be the head of the new active descriptor chain. This is the
+ * chain which is processed next. The active list must be empty before calling
+ * this function.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
+                                 struct ep93xx_dma_desc *desc)
+{
+       BUG_ON(!list_empty(&edmac->active));
+
+       list_add_tail(&desc->node, &edmac->active);
+
+       /* Flatten the @desc->tx_list chain into @edmac->active list */
+       while (!list_empty(&desc->tx_list)) {
+               struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
+                       struct ep93xx_dma_desc, node);
+
+               /*
+                * We copy the callback parameters from the first descriptor
+                * to all the chained descriptors. This way we can call the
+                * callback without having to find out the first descriptor in
+                * the chain. Useful for cyclic transfers.
+                */
+               d->txd.callback = desc->txd.callback;
+               d->txd.callback_param = desc->txd.callback_param;
+
+               list_move_tail(&d->node, &edmac->active);
+       }
+}
+
+/* Called with @edmac->lock held and interrupts disabled */
+static struct ep93xx_dma_desc *
+ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
+{
+       return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
+}
+
+/**
+ * ep93xx_dma_advance_active - advances to the next active descriptor
+ * @edmac: channel
+ *
+ * Function advances active descriptor to the next in the @edmac->active and
+ * returns %true if we still have descriptors in the chain to process.
+ * Otherwise returns %false.
+ *
+ * When the channel is in cyclic mode always returns %true.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
+{
+       list_rotate_left(&edmac->active);
+
+       if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+               return true;
+
+       /*
+        * If txd.cookie is set it means that we are back in the first
+        * descriptor in the chain and hence done with it.
+        */
+       return !ep93xx_dma_get_active(edmac)->txd.cookie;
+}
+
+/*
+ * M2P DMA implementation
+ */
+
+static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
+{
+       writel(control, edmac->regs + M2P_CONTROL);
+       /*
+        * EP93xx User's Guide states that we must perform a dummy read after
+        * write to the control register.
+        */
+       readl(edmac->regs + M2P_CONTROL);
+}
+
+static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+       struct ep93xx_dma_data *data = edmac->chan.private;
+       u32 control;
+
+       writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
+
+       control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
+               | M2P_CONTROL_ENABLE;
+       m2p_set_control(edmac, control);
+
+       return 0;
+}
+
+static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
+{
+       return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
+}
+
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+       u32 control;
+
+       control = readl(edmac->regs + M2P_CONTROL);
+       control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+       m2p_set_control(edmac, control);
+
+       while (m2p_channel_state(edmac) >= M2P_STATE_ON)
+               cpu_relax();
+
+       m2p_set_control(edmac, 0);
+
+       while (m2p_channel_state(edmac) == M2P_STATE_STALL)
+               cpu_relax();
+}
+
+static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+       struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+       u32 bus_addr;
+
+       if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+               bus_addr = desc->src_addr;
+       else
+               bus_addr = desc->dst_addr;
+
+       if (edmac->buffer == 0) {
+               writel(desc->size, edmac->regs + M2P_MAXCNT0);
+               writel(bus_addr, edmac->regs + M2P_BASE0);
+       } else {
+               writel(desc->size, edmac->regs + M2P_MAXCNT1);
+               writel(bus_addr, edmac->regs + M2P_BASE1);
+       }
+
+       edmac->buffer ^= 1;
+}
+
+static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+       u32 control = readl(edmac->regs + M2P_CONTROL);
+
+       m2p_fill_desc(edmac);
+       control |= M2P_CONTROL_STALLINT;
+
+       if (ep93xx_dma_advance_active(edmac)) {
+               m2p_fill_desc(edmac);
+               control |= M2P_CONTROL_NFBINT;
+       }
+
+       m2p_set_control(edmac, control);
+}
+
+static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+       u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
+       u32 control;
+
+       if (irq_status & M2P_INTERRUPT_ERROR) {
+               struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+               /* Clear the error interrupt */
+               writel(1, edmac->regs + M2P_INTERRUPT);
+
+               /*
+                * It seems that there is no easy way of reporting errors back
+                * to client so we just report the error here and continue as
+                * usual.
+                *
+                * Revisit this when there is a mechanism to report back the
+                * errors.
+                */
+               dev_err(chan2dev(edmac),
+                       "DMA transfer failed! Details:\n"
+                       "\tcookie       : %d\n"
+                       "\tsrc_addr     : 0x%08x\n"
+                       "\tdst_addr     : 0x%08x\n"
+                       "\tsize         : %zu\n",
+                       desc->txd.cookie, desc->src_addr, desc->dst_addr,
+                       desc->size);
+       }
+
+       switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
+       case M2P_INTERRUPT_STALL:
+               /* Disable interrupts */
+               control = readl(edmac->regs + M2P_CONTROL);
+               control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+               m2p_set_control(edmac, control);
+
+               return INTERRUPT_DONE;
+
+       case M2P_INTERRUPT_NFB:
+               if (ep93xx_dma_advance_active(edmac))
+                       m2p_fill_desc(edmac);
+
+               return INTERRUPT_NEXT_BUFFER;
+       }
+
+       return INTERRUPT_UNKNOWN;
+}
+
+/*
+ * M2M DMA implementation
+ *
+ * For the M2M transfers we don't use NFB at all. This is because it simply
+ * doesn't work well with memcpy transfers. When you submit both buffers it is
+ * extremely unlikely that you get an NFB interrupt, but it instead reports
+ * DONE interrupt and both buffers are already transferred which means that we
+ * weren't able to update the next buffer.
+ *
+ * So for now we "simulate" NFB by just submitting buffer after buffer
+ * without double buffering.
+ */
+
+static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+       const struct ep93xx_dma_data *data = edmac->chan.private;
+       u32 control = 0;
+
+       if (!data) {
+               /* This is memcpy channel, nothing to configure */
+               writel(control, edmac->regs + M2M_CONTROL);
+               return 0;
+       }
+
+       switch (data->port) {
+       case EP93XX_DMA_SSP:
+               /*
+                * This was found via experimenting - anything less than 5
+                * causes the channel to perform only a partial transfer which
+                * leads to problems since we don't get DONE interrupt then.
+                */
+               control = (5 << M2M_CONTROL_PWSC_SHIFT);
+               control |= M2M_CONTROL_NO_HDSK;
+
+               if (data->direction == DMA_TO_DEVICE) {
+                       control |= M2M_CONTROL_DAH;
+                       control |= M2M_CONTROL_TM_TX;
+                       control |= M2M_CONTROL_RSS_SSPTX;
+               } else {
+                       control |= M2M_CONTROL_SAH;
+                       control |= M2M_CONTROL_TM_RX;
+                       control |= M2M_CONTROL_RSS_SSPRX;
+               }
+               break;
+
+       case EP93XX_DMA_IDE:
+               /*
+                * This IDE part is totally untested. Values below are taken
+                * from the EP93xx Users's Guide and might not be correct.
+                */
+               control |= M2M_CONTROL_NO_HDSK;
+               control |= M2M_CONTROL_RSS_IDE;
+               control |= M2M_CONTROL_PW_16;
+
+               if (data->direction == DMA_TO_DEVICE) {
+                       /* Worst case from the UG */
+                       control = (3 << M2M_CONTROL_PWSC_SHIFT);
+                       control |= M2M_CONTROL_DAH;
+                       control |= M2M_CONTROL_TM_TX;
+               } else {
+                       control = (2 << M2M_CONTROL_PWSC_SHIFT);
+                       control |= M2M_CONTROL_SAH;
+                       control |= M2M_CONTROL_TM_RX;
+               }
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       writel(control, edmac->regs + M2M_CONTROL);
+       return 0;
+}
+
+static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+       /* Just disable the channel */
+       writel(0, edmac->regs + M2M_CONTROL);
+}
+
+static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+       struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+       if (edmac->buffer == 0) {
+               writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
+               writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
+               writel(desc->size, edmac->regs + M2M_BCR0);
+       } else {
+               writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
+               writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
+               writel(desc->size, edmac->regs + M2M_BCR1);
+       }
+
+       edmac->buffer ^= 1;
+}
+
+static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+       struct ep93xx_dma_data *data = edmac->chan.private;
+       u32 control = readl(edmac->regs + M2M_CONTROL);
+
+       /*
+        * Since we allow clients to configure PW (peripheral width) we always
+        * clear PW bits here and then set them according what is given in
+        * the runtime configuration.
+        */
+       control &= ~M2M_CONTROL_PW_MASK;
+       control |= edmac->runtime_ctrl;
+
+       m2m_fill_desc(edmac);
+       control |= M2M_CONTROL_DONEINT;
+
+       /*
+        * Now we can finally enable the channel. For M2M channel this must be
+        * done _after_ the BCRx registers are programmed.
+        */
+       control |= M2M_CONTROL_ENABLE;
+       writel(control, edmac->regs + M2M_CONTROL);
+
+       if (!data) {
+               /*
+                * For memcpy channels the software trigger must be asserted
+                * in order to start the memcpy operation.
+                */
+               control |= M2M_CONTROL_START;
+               writel(control, edmac->regs + M2M_CONTROL);
+       }
+}
+
+static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+       u32 control;
+
+       if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+               return INTERRUPT_UNKNOWN;
+
+       /* Clear the DONE bit */
+       writel(0, edmac->regs + M2M_INTERRUPT);
+
+       /* Disable interrupts and the channel */
+       control = readl(edmac->regs + M2M_CONTROL);
+       control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
+       writel(control, edmac->regs + M2M_CONTROL);
+
+       /*
+        * Since we only get DONE interrupt we have to find out ourselves
+        * whether there still is something to process. So we try to advance
+        * the chain an see whether it succeeds.
+        */
+       if (ep93xx_dma_advance_active(edmac)) {
+               edmac->edma->hw_submit(edmac);
+               return INTERRUPT_NEXT_BUFFER;
+       }
+
+       return INTERRUPT_DONE;
+}
+
+/*
+ * DMA engine API implementation
+ */
+
+static struct ep93xx_dma_desc *
+ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
+{
+       struct ep93xx_dma_desc *desc, *_desc;
+       struct ep93xx_dma_desc *ret = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&edmac->lock, flags);
+       list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
+               if (async_tx_test_ack(&desc->txd)) {
+                       list_del_init(&desc->node);
+
+                       /* Re-initialize the descriptor */
+                       desc->src_addr = 0;
+                       desc->dst_addr = 0;
+                       desc->size = 0;
+                       desc->complete = false;
+                       desc->txd.cookie = 0;
+                       desc->txd.callback = NULL;
+                       desc->txd.callback_param = NULL;
+
+                       ret = desc;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&edmac->lock, flags);
+       return ret;
+}
+
+static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
+                               struct ep93xx_dma_desc *desc)
+{
+       if (desc) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&edmac->lock, flags);
+               list_splice_init(&desc->tx_list, &edmac->free_list);
+               list_add(&desc->node, &edmac->free_list);
+               spin_unlock_irqrestore(&edmac->lock, flags);
+       }
+}
+
+/**
+ * ep93xx_dma_advance_work - start processing the next pending transaction
+ * @edmac: channel
+ *
+ * If we have pending transactions queued and we are currently idling, this
+ * function takes the next queued transaction from the @edmac->queue and
+ * pushes it to the hardware for execution.
+ */
+static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
+{
+       struct ep93xx_dma_desc *new;
+       unsigned long flags;
+
+       spin_lock_irqsave(&edmac->lock, flags);
+       if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
+               spin_unlock_irqrestore(&edmac->lock, flags);
+               return;
+       }
+
+       /* Take the next descriptor from the pending queue */
+       new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
+       list_del_init(&new->node);
+
+       ep93xx_dma_set_active(edmac, new);
+
+       /* Push it to the hardware */
+       edmac->edma->hw_submit(edmac);
+       spin_unlock_irqrestore(&edmac->lock, flags);
+}
+
+static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
+{
+       struct device *dev = desc->txd.chan->device->dev;
+
+       if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+               if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+                       dma_unmap_single(dev, desc->src_addr, desc->size,
+                                        DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(dev, desc->src_addr, desc->size,
+                                      DMA_TO_DEVICE);
+       }
+       if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+               if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+                       dma_unmap_single(dev, desc->dst_addr, desc->size,
+                                        DMA_FROM_DEVICE);
+               else
+                       dma_unmap_page(dev, desc->dst_addr, desc->size,
+                                      DMA_FROM_DEVICE);
+       }
+}
+
+static void ep93xx_dma_tasklet(unsigned long data)
+{
+       struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
+       struct ep93xx_dma_desc *desc, *d;
+       dma_async_tx_callback callback;
+       void *callback_param;
+       LIST_HEAD(list);
+
+       spin_lock_irq(&edmac->lock);
+       desc = ep93xx_dma_get_active(edmac);
+       if (desc->complete) {
+               edmac->last_completed = desc->txd.cookie;
+               list_splice_init(&edmac->active, &list);
+       }
+       spin_unlock_irq(&edmac->lock);
+
+       /* Pick up the next descriptor from the queue */
+       ep93xx_dma_advance_work(edmac);
+
+       callback = desc->txd.callback;
+       callback_param = desc->txd.callback_param;
+
+       /* Now we can release all the chained descriptors */
+       list_for_each_entry_safe(desc, d, &list, node) {
+               /*
+                * For the memcpy channels the API requires us to unmap the
+                * buffers unless requested otherwise.
+                */
+               if (!edmac->chan.private)
+                       ep93xx_dma_unmap_buffers(desc);
+
+               ep93xx_dma_desc_put(edmac, desc);
+       }
+
+       if (callback)
+               callback(callback_param);
+}
+
+static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
+{
+       struct ep93xx_dma_chan *edmac = dev_id;
+       irqreturn_t ret = IRQ_HANDLED;
+
+       spin_lock(&edmac->lock);
+
+       switch (edmac->edma->hw_interrupt(edmac)) {
+       case INTERRUPT_DONE:
+               ep93xx_dma_get_active(edmac)->complete = true;
+               tasklet_schedule(&edmac->tasklet);
+               break;
+
+       case INTERRUPT_NEXT_BUFFER:
+               if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+                       tasklet_schedule(&edmac->tasklet);
+               break;
+
+       default:
+               dev_warn(chan2dev(edmac), "unknown interrupt!\n");
+               ret = IRQ_NONE;
+               break;
+       }
+
+       spin_unlock(&edmac->lock);
+       return ret;
+}
+
+/**
+ * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
+ * @tx: descriptor to be executed
+ *
+ * Function will execute given descriptor on the hardware or if the hardware
+ * is busy, queue the descriptor to be executed later on. Returns cookie which
+ * can be used to poll the status of the descriptor.
+ */
+static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
+       struct ep93xx_dma_desc *desc;
+       dma_cookie_t cookie;
+       unsigned long flags;
+
+       spin_lock_irqsave(&edmac->lock, flags);
+
+       cookie = edmac->chan.cookie;
+
+       if (++cookie < 0)
+               cookie = 1;
+
+       desc = container_of(tx, struct ep93xx_dma_desc, txd);
+
+       edmac->chan.cookie = cookie;
+       desc->txd.cookie = cookie;
+
+       /*
+        * If nothing is currently prosessed, we push this descriptor
+        * directly to the hardware. Otherwise we put the descriptor
+        * to the pending queue.
+        */
+       if (list_empty(&edmac->active)) {
+               ep93xx_dma_set_active(edmac, desc);
+               edmac->edma->hw_submit(edmac);
+       } else {
+               list_add_tail(&desc->node, &edmac->queue);
+       }
+
+       spin_unlock_irqrestore(&edmac->lock, flags);
+       return cookie;
+}
+
+/**
+ * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
+ * @chan: channel to allocate resources
+ *
+ * Function allocates necessary resources for the given DMA channel and
+ * returns number of allocated descriptors for the channel. Negative errno
+ * is returned in case of failure.
+ */
+static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+       struct ep93xx_dma_data *data = chan->private;
+       const char *name = dma_chan_name(chan);
+       int ret, i;
+
+       /* Sanity check the channel parameters */
+       if (!edmac->edma->m2m) {
+               if (!data)
+                       return -EINVAL;
+               if (data->port < EP93XX_DMA_I2S1 ||
+                   data->port > EP93XX_DMA_IRDA)
+                       return -EINVAL;
+               if (data->direction != ep93xx_dma_chan_direction(chan))
+                       return -EINVAL;
+       } else {
+               if (data) {
+                       switch (data->port) {
+                       case EP93XX_DMA_SSP:
+                       case EP93XX_DMA_IDE:
+                               if (data->direction != DMA_TO_DEVICE &&
+                                   data->direction != DMA_FROM_DEVICE)
+                                       return -EINVAL;
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       if (data && data->name)
+               name = data->name;
+
+       ret = clk_enable(edmac->clk);
+       if (ret)
+               return ret;
+
+       ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
+       if (ret)
+               goto fail_clk_disable;
+
+       spin_lock_irq(&edmac->lock);
+       edmac->last_completed = 1;
+       edmac->chan.cookie = 1;
+       ret = edmac->edma->hw_setup(edmac);
+       spin_unlock_irq(&edmac->lock);
+
+       if (ret)
+               goto fail_free_irq;
+
+       for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
+               struct ep93xx_dma_desc *desc;
+
+               desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+               if (!desc) {
+                       dev_warn(chan2dev(edmac), "not enough descriptors\n");
+                       break;
+               }
+
+               INIT_LIST_HEAD(&desc->tx_list);
+
+               dma_async_tx_descriptor_init(&desc->txd, chan);
+               desc->txd.flags = DMA_CTRL_ACK;
+               desc->txd.tx_submit = ep93xx_dma_tx_submit;
+
+               ep93xx_dma_desc_put(edmac, desc);
+       }
+
+       return i;
+
+fail_free_irq:
+       free_irq(edmac->irq, edmac);
+fail_clk_disable:
+       clk_disable(edmac->clk);
+
+       return ret;
+}
+
+/**
+ * ep93xx_dma_free_chan_resources - release resources for the channel
+ * @chan: channel
+ *
+ * Function releases all the resources allocated for the given channel.
+ * The channel must be idle when this is called.
+ */
+static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+       struct ep93xx_dma_desc *desc, *d;
+       unsigned long flags;
+       LIST_HEAD(list);
+
+       BUG_ON(!list_empty(&edmac->active));
+       BUG_ON(!list_empty(&edmac->queue));
+
+       spin_lock_irqsave(&edmac->lock, flags);
+       edmac->edma->hw_shutdown(edmac);
+       edmac->runtime_addr = 0;
+       edmac->runtime_ctrl = 0;
+       edmac->buffer = 0;
+       list_splice_init(&edmac->free_list, &list);
+       spin_unlock_irqrestore(&edmac->lock, flags);
+
+       list_for_each_entry_safe(desc, d, &list, node)
+               kfree(desc);
+
+       clk_disable(edmac->clk);
+       free_irq(edmac->irq, edmac);
+}
+
+/**
+ * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
+ * @chan: channel
+ * @dest: destination bus address
+ * @src: source bus address
+ * @len: size of the transaction
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+                          dma_addr_t src, size_t len, unsigned long flags)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+       struct ep93xx_dma_desc *desc, *first;
+       size_t bytes, offset;
+
+       first = NULL;
+       for (offset = 0; offset < len; offset += bytes) {
+               desc = ep93xx_dma_desc_get(edmac);
+               if (!desc) {
+                       dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+                       goto fail;
+               }
+
+               bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
+
+               desc->src_addr = src + offset;
+               desc->dst_addr = dest + offset;
+               desc->size = bytes;
+
+               if (!first)
+                       first = desc;
+               else
+                       list_add_tail(&desc->node, &first->tx_list);
+       }
+
+       first->txd.cookie = -EBUSY;
+       first->txd.flags = flags;
+
+       return &first->txd;
+fail:
+       ep93xx_dma_desc_put(edmac, first);
+       return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
+ * @chan: channel
+ * @sgl: list of buffers to transfer
+ * @sg_len: number of entries in @sgl
+ * @dir: direction of tha DMA transfer
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+                        unsigned int sg_len, enum dma_data_direction dir,
+                        unsigned long flags)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+       struct ep93xx_dma_desc *desc, *first;
+       struct scatterlist *sg;
+       int i;
+
+       if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+               dev_warn(chan2dev(edmac),
+                        "channel was configured with different direction\n");
+               return NULL;
+       }
+
+       if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+               dev_warn(chan2dev(edmac),
+                        "channel is already used for cyclic transfers\n");
+               return NULL;
+       }
+
+       first = NULL;
+       for_each_sg(sgl, sg, sg_len, i) {
+               size_t sg_len = sg_dma_len(sg);
+
+               if (sg_len > DMA_MAX_CHAN_BYTES) {
+                       dev_warn(chan2dev(edmac), "too big transfer size %d\n",
+                                sg_len);
+                       goto fail;
+               }
+
+               desc = ep93xx_dma_desc_get(edmac);
+               if (!desc) {
+                       dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+                       goto fail;
+               }
+
+               if (dir == DMA_TO_DEVICE) {
+                       desc->src_addr = sg_dma_address(sg);
+                       desc->dst_addr = edmac->runtime_addr;
+               } else {
+                       desc->src_addr = edmac->runtime_addr;
+                       desc->dst_addr = sg_dma_address(sg);
+               }
+               desc->size = sg_len;
+
+               if (!first)
+                       first = desc;
+               else
+                       list_add_tail(&desc->node, &first->tx_list);
+       }
+
+       first->txd.cookie = -EBUSY;
+       first->txd.flags = flags;
+
+       return &first->txd;
+
+fail:
+       ep93xx_dma_desc_put(edmac, first);
+       return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
+ * @chan: channel
+ * @dma_addr: DMA mapped address of the buffer
+ * @buf_len: length of the buffer (in bytes)
+ * @period_len: lenght of a single period
+ * @dir: direction of the operation
+ *
+ * Prepares a descriptor for cyclic DMA operation. This means that once the
+ * descriptor is submitted, we will be submitting in a @period_len sized
+ * buffers and calling callback once the period has been elapsed. Transfer
+ * terminates only when client calls dmaengine_terminate_all() for this
+ * channel.
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+                          size_t buf_len, size_t period_len,
+                          enum dma_data_direction dir)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+       struct ep93xx_dma_desc *desc, *first;
+       size_t offset = 0;
+
+       if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+               dev_warn(chan2dev(edmac),
+                        "channel was configured with different direction\n");
+               return NULL;
+       }
+
+       if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+               dev_warn(chan2dev(edmac),
+                        "channel is already used for cyclic transfers\n");
+               return NULL;
+       }
+
+       if (period_len > DMA_MAX_CHAN_BYTES) {
+               dev_warn(chan2dev(edmac), "too big period length %d\n",
+                        period_len);
+               return NULL;
+       }
+
+       /* Split the buffer into period size chunks */
+       first = NULL;
+       for (offset = 0; offset < buf_len; offset += period_len) {
+               desc = ep93xx_dma_desc_get(edmac);
+               if (!desc) {
+                       dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+                       goto fail;
+               }
+
+               if (dir == DMA_TO_DEVICE) {
+                       desc->src_addr = dma_addr + offset;
+                       desc->dst_addr = edmac->runtime_addr;
+               } else {
+                       desc->src_addr = edmac->runtime_addr;
+                       desc->dst_addr = dma_addr + offset;
+               }
+
+               desc->size = period_len;
+
+               if (!first)
+                       first = desc;
+               else
+                       list_add_tail(&desc->node, &first->tx_list);
+       }
+
+       first->txd.cookie = -EBUSY;
+
+       return &first->txd;
+
+fail:
+       ep93xx_dma_desc_put(edmac, first);
+       return NULL;
+}
+
+/**
+ * ep93xx_dma_terminate_all - terminate all transactions
+ * @edmac: channel
+ *
+ * Stops all DMA transactions. All descriptors are put back to the
+ * @edmac->free_list and callbacks are _not_ called.
+ */
+static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
+{
+       struct ep93xx_dma_desc *desc, *_d;
+       unsigned long flags;
+       LIST_HEAD(list);
+
+       spin_lock_irqsave(&edmac->lock, flags);
+       /* First we disable and flush the DMA channel */
+       edmac->edma->hw_shutdown(edmac);
+       clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
+       list_splice_init(&edmac->active, &list);
+       list_splice_init(&edmac->queue, &list);
+       /*
+        * We then re-enable the channel. This way we can continue submitting
+        * the descriptors by just calling ->hw_submit() again.
+        */
+       edmac->edma->hw_setup(edmac);
+       spin_unlock_irqrestore(&edmac->lock, flags);
+
+       list_for_each_entry_safe(desc, _d, &list, node)
+               ep93xx_dma_desc_put(edmac, desc);
+
+       return 0;
+}
+
+static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
+                                  struct dma_slave_config *config)
+{
+       enum dma_slave_buswidth width;
+       unsigned long flags;
+       u32 addr, ctrl;
+
+       if (!edmac->edma->m2m)
+               return -EINVAL;
+
+       switch (config->direction) {
+       case DMA_FROM_DEVICE:
+               width = config->src_addr_width;
+               addr = config->src_addr;
+               break;
+
+       case DMA_TO_DEVICE:
+               width = config->dst_addr_width;
+               addr = config->dst_addr;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       switch (width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               ctrl = 0;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               ctrl = M2M_CONTROL_PW_16;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               ctrl = M2M_CONTROL_PW_32;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&edmac->lock, flags);
+       edmac->runtime_addr = addr;
+       edmac->runtime_ctrl = ctrl;
+       spin_unlock_irqrestore(&edmac->lock, flags);
+
+       return 0;
+}
+
+/**
+ * ep93xx_dma_control - manipulate all pending operations on a channel
+ * @chan: channel
+ * @cmd: control command to perform
+ * @arg: optional argument
+ *
+ * Controls the channel. Function returns %0 in case of success or negative
+ * error in case of failure.
+ */
+static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+                             unsigned long arg)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+       struct dma_slave_config *config;
+
+       switch (cmd) {
+       case DMA_TERMINATE_ALL:
+               return ep93xx_dma_terminate_all(edmac);
+
+       case DMA_SLAVE_CONFIG:
+               config = (struct dma_slave_config *)arg;
+               return ep93xx_dma_slave_config(edmac, config);
+
+       default:
+               break;
+       }
+
+       return -ENOSYS;
+}
+
+/**
+ * ep93xx_dma_tx_status - check if a transaction is completed
+ * @chan: channel
+ * @cookie: transaction specific cookie
+ * @state: state of the transaction is stored here if given
+ *
+ * This function can be used to query state of a given transaction.
+ */
+static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
+                                           dma_cookie_t cookie,
+                                           struct dma_tx_state *state)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+       dma_cookie_t last_used, last_completed;
+       enum dma_status ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&edmac->lock, flags);
+       last_used = chan->cookie;
+       last_completed = edmac->last_completed;
+       spin_unlock_irqrestore(&edmac->lock, flags);
+
+       ret = dma_async_is_complete(cookie, last_completed, last_used);
+       dma_set_tx_state(state, last_completed, last_used, 0);
+
+       return ret;
+}
+
+/**
+ * ep93xx_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ep93xx_dma_issue_pending(struct dma_chan *chan)
+{
+       ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
+}
+
+static int __init ep93xx_dma_probe(struct platform_device *pdev)
+{
+       struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
+       struct ep93xx_dma_engine *edma;
+       struct dma_device *dma_dev;
+       size_t edma_size;
+       int ret, i;
+
+       edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
+       edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
+       if (!edma)
+               return -ENOMEM;
+
+       dma_dev = &edma->dma_dev;
+       edma->m2m = platform_get_device_id(pdev)->driver_data;
+       edma->num_channels = pdata->num_channels;
+
+       INIT_LIST_HEAD(&dma_dev->channels);
+       for (i = 0; i < pdata->num_channels; i++) {
+               const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
+               struct ep93xx_dma_chan *edmac = &edma->channels[i];
+
+               edmac->chan.device = dma_dev;
+               edmac->regs = cdata->base;
+               edmac->irq = cdata->irq;
+               edmac->edma = edma;
+
+               edmac->clk = clk_get(NULL, cdata->name);
+               if (IS_ERR(edmac->clk)) {
+                       dev_warn(&pdev->dev, "failed to get clock for %s\n",
+                                cdata->name);
+                       continue;
+               }
+
+               spin_lock_init(&edmac->lock);
+               INIT_LIST_HEAD(&edmac->active);
+               INIT_LIST_HEAD(&edmac->queue);
+               INIT_LIST_HEAD(&edmac->free_list);
+               tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
+                            (unsigned long)edmac);
+
+               list_add_tail(&edmac->chan.device_node,
+                             &dma_dev->channels);
+       }
+
+       dma_cap_zero(dma_dev->cap_mask);
+       dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+       dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+
+       dma_dev->dev = &pdev->dev;
+       dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
+       dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
+       dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
+       dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
+       dma_dev->device_control = ep93xx_dma_control;
+       dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
+       dma_dev->device_tx_status = ep93xx_dma_tx_status;
+
+       dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
+
+       if (edma->m2m) {
+               dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+               dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
+
+               edma->hw_setup = m2m_hw_setup;
+               edma->hw_shutdown = m2m_hw_shutdown;
+               edma->hw_submit = m2m_hw_submit;
+               edma->hw_interrupt = m2m_hw_interrupt;
+       } else {
+               dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+               edma->hw_setup = m2p_hw_setup;
+               edma->hw_shutdown = m2p_hw_shutdown;
+               edma->hw_submit = m2p_hw_submit;
+               edma->hw_interrupt = m2p_hw_interrupt;
+       }
+
+       ret = dma_async_device_register(dma_dev);
+       if (unlikely(ret)) {
+               for (i = 0; i < edma->num_channels; i++) {
+                       struct ep93xx_dma_chan *edmac = &edma->channels[i];
+                       if (!IS_ERR_OR_NULL(edmac->clk))
+                               clk_put(edmac->clk);
+               }
+               kfree(edma);
+       } else {
+               dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
+                        edma->m2m ? "M" : "P");
+       }
+
+       return ret;
+}
+
+static struct platform_device_id ep93xx_dma_driver_ids[] = {
+       { "ep93xx-dma-m2p", 0 },
+       { "ep93xx-dma-m2m", 1 },
+       { },
+};
+
+static struct platform_driver ep93xx_dma_driver = {
+       .driver         = {
+               .name   = "ep93xx-dma",
+       },
+       .id_table       = ep93xx_dma_driver_ids,
+};
+
+static int __init ep93xx_dma_module_init(void)
+{
+       return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
+}
+subsys_initcall(ep93xx_dma_module_init);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
+MODULE_DESCRIPTION("EP93xx DMA driver");
+MODULE_LICENSE("GPL");
index de35c3a..c013481 100644 (file)
@@ -86,9 +86,6 @@ config SPI_BFIN_SPORT
        help
          Enable support for a SPI bus via the Blackfin SPORT peripheral.
 
-         This driver can also be built as a module.  If so, the module
-         will be called spi_bfin_sport.
-
 config SPI_AU1550
        tristate "Au1550/Au12x0 SPI Controller"
        depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
@@ -97,9 +94,6 @@ config SPI_AU1550
          If you say yes to this option, support will be included for the
          Au1550 SPI controller (may also work with Au1200,Au1210,Au1250).
 
-         This driver can also be built as a module.  If so, the module
-         will be called au1550_spi.
-
 config SPI_BITBANG
        tristate "Utilities for Bitbanging SPI masters"
        help
@@ -130,9 +124,6 @@ config SPI_COLDFIRE_QSPI
          This enables support for the Coldfire QSPI controller in master
          mode.
 
-         This driver can also be built as a module.  If so, the module
-         will be called coldfire_qspi.
-
 config SPI_DAVINCI
        tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
        depends on SPI_MASTER && ARCH_DAVINCI
@@ -140,9 +131,6 @@ config SPI_DAVINCI
        help
          SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
 
-         This driver can also be built as a module. The module will be called
-         davinci_spi.
-
 config SPI_EP93XX
        tristate "Cirrus Logic EP93xx SPI controller"
        depends on ARCH_EP93XX
@@ -150,9 +138,6 @@ config SPI_EP93XX
          This enables using the Cirrus EP93xx SPI controller in master
          mode.
 
-         To compile this driver as a module, choose M here. The module will be
-         called ep93xx_spi.
-
 config SPI_GPIO
        tristate "GPIO-based bitbanging SPI Master"
        depends on GENERIC_GPIO
@@ -385,16 +370,16 @@ config SPI_TI_SSP
          This selects an SPI master implementation using a TI sequencer
          serial port.
 
-         To compile this driver as a module, choose M here: the
-         module will be called ti-ssp-spi.
-
 config SPI_TOPCLIFF_PCH
-       tristate "Topcliff PCH SPI Controller"
+       tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller"
        depends on PCI
        help
          SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
          used in some x86 embedded processors.
 
+         This driver also supports the ML7213, a companion chip for the
+         Atom E6xx series and compatible with the Intel EG20T PCH.
+
 config SPI_TXX9
        tristate "Toshiba TXx9 SPI controller"
        depends on GENERIC_GPIO && CPU_TX49XX
index 0f8c69b..b60b04b 100644 (file)
@@ -7,68 +7,56 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
 # small core, mostly translating board-specific
 # config declarations into driver model code
 obj-$(CONFIG_SPI_MASTER)               += spi.o
+obj-$(CONFIG_SPI_SPIDEV)               += spidev.o
 
 # SPI master controller drivers (bus)
-obj-$(CONFIG_SPI_ALTERA)               += spi_altera.o
-obj-$(CONFIG_SPI_ATMEL)                        += atmel_spi.o
-obj-$(CONFIG_SPI_ATH79)                        += ath79_spi.o
-obj-$(CONFIG_SPI_BFIN)                 += spi_bfin5xx.o
-obj-$(CONFIG_SPI_BFIN_SPORT)           += spi_bfin_sport.o
-obj-$(CONFIG_SPI_BITBANG)              += spi_bitbang.o
-obj-$(CONFIG_SPI_AU1550)               += au1550_spi.o
-obj-$(CONFIG_SPI_BUTTERFLY)            += spi_butterfly.o
-obj-$(CONFIG_SPI_COLDFIRE_QSPI)                += coldfire_qspi.o
-obj-$(CONFIG_SPI_DAVINCI)              += davinci_spi.o
-obj-$(CONFIG_SPI_DESIGNWARE)           += dw_spi.o
-obj-$(CONFIG_SPI_DW_PCI)               += dw_spi_midpci.o
-dw_spi_midpci-objs                     := dw_spi_pci.o dw_spi_mid.o
-obj-$(CONFIG_SPI_DW_MMIO)              += dw_spi_mmio.o
-obj-$(CONFIG_SPI_EP93XX)               += ep93xx_spi.o
-obj-$(CONFIG_SPI_GPIO)                 += spi_gpio.o
-obj-$(CONFIG_SPI_IMX)                  += spi_imx.o
-obj-$(CONFIG_SPI_LM70_LLP)             += spi_lm70llp.o
-obj-$(CONFIG_SPI_PXA2XX)               += pxa2xx_spi.o
-obj-$(CONFIG_SPI_PXA2XX_PCI)           += pxa2xx_spi_pci.o
-obj-$(CONFIG_SPI_OC_TINY)              += spi_oc_tiny.o
-obj-$(CONFIG_SPI_OMAP_UWIRE)           += omap_uwire.o
-obj-$(CONFIG_SPI_OMAP24XX)             += omap2_mcspi.o
-obj-$(CONFIG_SPI_OMAP_100K)            += omap_spi_100k.o
-obj-$(CONFIG_SPI_ORION)                        += orion_spi.o
-obj-$(CONFIG_SPI_PL022)                        += amba-pl022.o
-obj-$(CONFIG_SPI_MPC512x_PSC)          += mpc512x_psc_spi.o
-obj-$(CONFIG_SPI_MPC52xx_PSC)          += mpc52xx_psc_spi.o
-obj-$(CONFIG_SPI_MPC52xx)              += mpc52xx_spi.o
-obj-$(CONFIG_SPI_FSL_LIB)              += spi_fsl_lib.o
-obj-$(CONFIG_SPI_FSL_ESPI)             += spi_fsl_espi.o
-obj-$(CONFIG_SPI_FSL_SPI)              += spi_fsl_spi.o
-obj-$(CONFIG_SPI_PPC4xx)               += spi_ppc4xx.o
-obj-$(CONFIG_SPI_S3C24XX_GPIO)         += spi_s3c24xx_gpio.o
-obj-$(CONFIG_SPI_S3C24XX)              += spi_s3c24xx_hw.o
-obj-$(CONFIG_SPI_S3C64XX)              += spi_s3c64xx.o
-obj-$(CONFIG_SPI_TEGRA)                        += spi_tegra.o
-obj-$(CONFIG_SPI_TI_SSP)               += ti-ssp-spi.o
-obj-$(CONFIG_SPI_TOPCLIFF_PCH)         += spi_topcliff_pch.o
-obj-$(CONFIG_SPI_TXX9)                 += spi_txx9.o
-obj-$(CONFIG_SPI_XILINX)               += xilinx_spi.o
-obj-$(CONFIG_SPI_SH)                   += spi_sh.o
-obj-$(CONFIG_SPI_SH_SCI)               += spi_sh_sci.o
-obj-$(CONFIG_SPI_SH_MSIOF)             += spi_sh_msiof.o
-obj-$(CONFIG_SPI_STMP3XXX)             += spi_stmp.o
-obj-$(CONFIG_SPI_NUC900)               += spi_nuc900.o
+obj-$(CONFIG_SPI_ALTERA)               += spi-altera.o
+obj-$(CONFIG_SPI_ATMEL)                        += spi-atmel.o
+obj-$(CONFIG_SPI_ATH79)                        += spi-ath79.o
+obj-$(CONFIG_SPI_AU1550)               += spi-au1550.o
+obj-$(CONFIG_SPI_BFIN)                 += spi-bfin5xx.o
+obj-$(CONFIG_SPI_BFIN_SPORT)           += spi-bfin-sport.o
+obj-$(CONFIG_SPI_BITBANG)              += spi-bitbang.o
+obj-$(CONFIG_SPI_BUTTERFLY)            += spi-butterfly.o
+obj-$(CONFIG_SPI_COLDFIRE_QSPI)                += spi-coldfire-qspi.o
+obj-$(CONFIG_SPI_DAVINCI)              += spi-davinci.o
+obj-$(CONFIG_SPI_DESIGNWARE)           += spi-dw.o
+obj-$(CONFIG_SPI_DW_MMIO)              += spi-dw-mmio.o
+obj-$(CONFIG_SPI_DW_PCI)               += spi-dw-midpci.o
+spi-dw-midpci-objs                     := spi-dw-pci.o spi-dw-mid.o
+obj-$(CONFIG_SPI_EP93XX)               += spi-ep93xx.o
+obj-$(CONFIG_SPI_FSL_LIB)              += spi-fsl-lib.o
+obj-$(CONFIG_SPI_FSL_ESPI)             += spi-fsl-espi.o
+obj-$(CONFIG_SPI_FSL_SPI)              += spi-fsl-spi.o
+obj-$(CONFIG_SPI_GPIO)                 += spi-gpio.o
+obj-$(CONFIG_SPI_IMX)                  += spi-imx.o
+obj-$(CONFIG_SPI_LM70_LLP)             += spi-lm70llp.o
+obj-$(CONFIG_SPI_MPC512x_PSC)          += spi-mpc512x-psc.o
+obj-$(CONFIG_SPI_MPC52xx_PSC)          += spi-mpc52xx-psc.o
+obj-$(CONFIG_SPI_MPC52xx)              += spi-mpc52xx.o
+obj-$(CONFIG_SPI_NUC900)               += spi-nuc900.o
+obj-$(CONFIG_SPI_OC_TINY)              += spi-oc-tiny.o
+obj-$(CONFIG_SPI_OMAP_UWIRE)           += spi-omap-uwire.o
+obj-$(CONFIG_SPI_OMAP_100K)            += spi-omap-100k.o
+obj-$(CONFIG_SPI_OMAP24XX)             += spi-omap2-mcspi.o
+obj-$(CONFIG_SPI_ORION)                        += spi-orion.o
+obj-$(CONFIG_SPI_PL022)                        += spi-pl022.o
+obj-$(CONFIG_SPI_PPC4xx)               += spi-ppc4xx.o
+obj-$(CONFIG_SPI_PXA2XX)               += spi-pxa2xx.o
+obj-$(CONFIG_SPI_PXA2XX_PCI)           += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_S3C24XX_GPIO)         += spi-s3c24xx-gpio.o
+obj-$(CONFIG_SPI_S3C24XX)              += spi-s3c24xx-hw.o
+spi-s3c24xx-hw-y                       := spi-s3c24xx.o
+spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
+obj-$(CONFIG_SPI_S3C64XX)              += spi-s3c64xx.o
+obj-$(CONFIG_SPI_SH)                   += spi-sh.o
+obj-$(CONFIG_SPI_SH_MSIOF)             += spi-sh-msiof.o
+obj-$(CONFIG_SPI_SH_SCI)               += spi-sh-sci.o
+obj-$(CONFIG_SPI_STMP3XXX)             += spi-stmp.o
+obj-$(CONFIG_SPI_TEGRA)                        += spi-tegra.o
+obj-$(CONFIG_SPI_TI_SSP)               += spi-ti-ssp.o
+obj-$(CONFIG_SPI_TLE62X0)              += spi-tle62x0.o
+obj-$(CONFIG_SPI_TOPCLIFF_PCH)         += spi-topcliff-pch.o
+obj-$(CONFIG_SPI_TXX9)                 += spi-txx9.o
+obj-$(CONFIG_SPI_XILINX)               += spi-xilinx.o
 
-# special build for s3c24xx spi driver with fiq support
-spi_s3c24xx_hw-y                       := spi_s3c24xx.o
-spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
-
-#      ... add above this line ...
-
-# SPI protocol drivers (device/link on bus)
-obj-$(CONFIG_SPI_SPIDEV)       += spidev.o
-obj-$(CONFIG_SPI_TLE62X0)      += tle62x0.o
-#      ... add above this line ...
-
-# SPI slave controller drivers (upstream link)
-#      ... add above this line ...
-
-# SPI slave drivers (protocol for that link)
-#      ... add above this line ...
diff --git a/drivers/spi/atmel_spi.h b/drivers/spi/atmel_spi.h
deleted file mode 100644 (file)
index 6e06b6a..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Register definitions for Atmel Serial Peripheral Interface (SPI)
- *
- * Copyright (C) 2006 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ATMEL_SPI_H__
-#define __ATMEL_SPI_H__
-
-/* SPI register offsets */
-#define SPI_CR                                 0x0000
-#define SPI_MR                                 0x0004
-#define SPI_RDR                                        0x0008
-#define SPI_TDR                                        0x000c
-#define SPI_SR                                 0x0010
-#define SPI_IER                                        0x0014
-#define SPI_IDR                                        0x0018
-#define SPI_IMR                                        0x001c
-#define SPI_CSR0                               0x0030
-#define SPI_CSR1                               0x0034
-#define SPI_CSR2                               0x0038
-#define SPI_CSR3                               0x003c
-#define SPI_RPR                                        0x0100
-#define SPI_RCR                                        0x0104
-#define SPI_TPR                                        0x0108
-#define SPI_TCR                                        0x010c
-#define SPI_RNPR                               0x0110
-#define SPI_RNCR                               0x0114
-#define SPI_TNPR                               0x0118
-#define SPI_TNCR                               0x011c
-#define SPI_PTCR                               0x0120
-#define SPI_PTSR                               0x0124
-
-/* Bitfields in CR */
-#define SPI_SPIEN_OFFSET                       0
-#define SPI_SPIEN_SIZE                         1
-#define SPI_SPIDIS_OFFSET                      1
-#define SPI_SPIDIS_SIZE                                1
-#define SPI_SWRST_OFFSET                       7
-#define SPI_SWRST_SIZE                         1
-#define SPI_LASTXFER_OFFSET                    24
-#define SPI_LASTXFER_SIZE                      1
-
-/* Bitfields in MR */
-#define SPI_MSTR_OFFSET                                0
-#define SPI_MSTR_SIZE                          1
-#define SPI_PS_OFFSET                          1
-#define SPI_PS_SIZE                            1
-#define SPI_PCSDEC_OFFSET                      2
-#define SPI_PCSDEC_SIZE                                1
-#define SPI_FDIV_OFFSET                                3
-#define SPI_FDIV_SIZE                          1
-#define SPI_MODFDIS_OFFSET                     4
-#define SPI_MODFDIS_SIZE                       1
-#define SPI_LLB_OFFSET                         7
-#define SPI_LLB_SIZE                           1
-#define SPI_PCS_OFFSET                         16
-#define SPI_PCS_SIZE                           4
-#define SPI_DLYBCS_OFFSET                      24
-#define SPI_DLYBCS_SIZE                                8
-
-/* Bitfields in RDR */
-#define SPI_RD_OFFSET                          0
-#define SPI_RD_SIZE                            16
-
-/* Bitfields in TDR */
-#define SPI_TD_OFFSET                          0
-#define SPI_TD_SIZE                            16
-
-/* Bitfields in SR */
-#define SPI_RDRF_OFFSET                                0
-#define SPI_RDRF_SIZE                          1
-#define SPI_TDRE_OFFSET                                1
-#define SPI_TDRE_SIZE                          1
-#define SPI_MODF_OFFSET                                2
-#define SPI_MODF_SIZE                          1
-#define SPI_OVRES_OFFSET                       3
-#define SPI_OVRES_SIZE                         1
-#define SPI_ENDRX_OFFSET                       4
-#define SPI_ENDRX_SIZE                         1
-#define SPI_ENDTX_OFFSET                       5
-#define SPI_ENDTX_SIZE                         1
-#define SPI_RXBUFF_OFFSET                      6
-#define SPI_RXBUFF_SIZE                                1
-#define SPI_TXBUFE_OFFSET                      7
-#define SPI_TXBUFE_SIZE                                1
-#define SPI_NSSR_OFFSET                                8
-#define SPI_NSSR_SIZE                          1
-#define SPI_TXEMPTY_OFFSET                     9
-#define SPI_TXEMPTY_SIZE                       1
-#define SPI_SPIENS_OFFSET                      16
-#define SPI_SPIENS_SIZE                                1
-
-/* Bitfields in CSR0 */
-#define SPI_CPOL_OFFSET                                0
-#define SPI_CPOL_SIZE                          1
-#define SPI_NCPHA_OFFSET                       1
-#define SPI_NCPHA_SIZE                         1
-#define SPI_CSAAT_OFFSET                       3
-#define SPI_CSAAT_SIZE                         1
-#define SPI_BITS_OFFSET                                4
-#define SPI_BITS_SIZE                          4
-#define SPI_SCBR_OFFSET                                8
-#define SPI_SCBR_SIZE                          8
-#define SPI_DLYBS_OFFSET                       16
-#define SPI_DLYBS_SIZE                         8
-#define SPI_DLYBCT_OFFSET                      24
-#define SPI_DLYBCT_SIZE                                8
-
-/* Bitfields in RCR */
-#define SPI_RXCTR_OFFSET                       0
-#define SPI_RXCTR_SIZE                         16
-
-/* Bitfields in TCR */
-#define SPI_TXCTR_OFFSET                       0
-#define SPI_TXCTR_SIZE                         16
-
-/* Bitfields in RNCR */
-#define SPI_RXNCR_OFFSET                       0
-#define SPI_RXNCR_SIZE                         16
-
-/* Bitfields in TNCR */
-#define SPI_TXNCR_OFFSET                       0
-#define SPI_TXNCR_SIZE                         16
-
-/* Bitfields in PTCR */
-#define SPI_RXTEN_OFFSET                       0
-#define SPI_RXTEN_SIZE                         1
-#define SPI_RXTDIS_OFFSET                      1
-#define SPI_RXTDIS_SIZE                                1
-#define SPI_TXTEN_OFFSET                       8
-#define SPI_TXTEN_SIZE                         1
-#define SPI_TXTDIS_OFFSET                      9
-#define SPI_TXTDIS_SIZE                                1
-
-/* Constants for BITS */
-#define SPI_BITS_8_BPT                         0
-#define SPI_BITS_9_BPT                         1
-#define SPI_BITS_10_BPT                                2
-#define SPI_BITS_11_BPT                                3
-#define SPI_BITS_12_BPT                                4
-#define SPI_BITS_13_BPT                                5
-#define SPI_BITS_14_BPT                                6
-#define SPI_BITS_15_BPT                                7
-#define SPI_BITS_16_BPT                                8
-
-/* Bit manipulation macros */
-#define SPI_BIT(name) \
-       (1 << SPI_##name##_OFFSET)
-#define SPI_BF(name,value) \
-       (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
-#define SPI_BFEXT(name,value) \
-       (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
-#define SPI_BFINS(name,value,old) \
-       ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
-         | SPI_BF(name,value))
-
-/* Register access macros */
-#define spi_readl(port,reg) \
-       __raw_readl((port)->regs + SPI_##reg)
-#define spi_writel(port,reg,value) \
-       __raw_writel((value), (port)->regs + SPI_##reg)
-
-#endif /* __ATMEL_SPI_H__ */
similarity index 99%
rename from drivers/spi/ath79_spi.c
rename to drivers/spi/spi-ath79.c
index fcff810..03019bf 100644 (file)
@@ -232,7 +232,7 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
                goto err_put_master;
        }
 
-       sp->base = ioremap(r->start, r->end - r->start + 1);
+       sp->base = ioremap(r->start, resource_size(r));
        if (!sp->base) {
                ret = -ENXIO;
                goto err_put_master;
similarity index 85%
rename from drivers/spi/atmel_spi.c
rename to drivers/spi/spi-atmel.c
index 08711e9..82dee9a 100644 (file)
 #include <mach/gpio.h>
 #include <mach/cpu.h>
 
-#include "atmel_spi.h"
+/* SPI register offsets */
+#define SPI_CR                                 0x0000
+#define SPI_MR                                 0x0004
+#define SPI_RDR                                        0x0008
+#define SPI_TDR                                        0x000c
+#define SPI_SR                                 0x0010
+#define SPI_IER                                        0x0014
+#define SPI_IDR                                        0x0018
+#define SPI_IMR                                        0x001c
+#define SPI_CSR0                               0x0030
+#define SPI_CSR1                               0x0034
+#define SPI_CSR2                               0x0038
+#define SPI_CSR3                               0x003c
+#define SPI_RPR                                        0x0100
+#define SPI_RCR                                        0x0104
+#define SPI_TPR                                        0x0108
+#define SPI_TCR                                        0x010c
+#define SPI_RNPR                               0x0110
+#define SPI_RNCR                               0x0114
+#define SPI_TNPR                               0x0118
+#define SPI_TNCR                               0x011c
+#define SPI_PTCR                               0x0120
+#define SPI_PTSR                               0x0124
+
+/* Bitfields in CR */
+#define SPI_SPIEN_OFFSET                       0
+#define SPI_SPIEN_SIZE                         1
+#define SPI_SPIDIS_OFFSET                      1
+#define SPI_SPIDIS_SIZE                                1
+#define SPI_SWRST_OFFSET                       7
+#define SPI_SWRST_SIZE                         1
+#define SPI_LASTXFER_OFFSET                    24
+#define SPI_LASTXFER_SIZE                      1
+
+/* Bitfields in MR */
+#define SPI_MSTR_OFFSET                                0
+#define SPI_MSTR_SIZE                          1
+#define SPI_PS_OFFSET                          1
+#define SPI_PS_SIZE                            1
+#define SPI_PCSDEC_OFFSET                      2
+#define SPI_PCSDEC_SIZE                                1
+#define SPI_FDIV_OFFSET                                3
+#define SPI_FDIV_SIZE                          1
+#define SPI_MODFDIS_OFFSET                     4
+#define SPI_MODFDIS_SIZE                       1
+#define SPI_LLB_OFFSET                         7
+#define SPI_LLB_SIZE                           1
+#define SPI_PCS_OFFSET                         16
+#define SPI_PCS_SIZE                           4
+#define SPI_DLYBCS_OFFSET                      24
+#define SPI_DLYBCS_SIZE                                8
+
+/* Bitfields in RDR */
+#define SPI_RD_OFFSET                          0
+#define SPI_RD_SIZE                            16
+
+/* Bitfields in TDR */
+#define SPI_TD_OFFSET                          0
+#define SPI_TD_SIZE                            16
+
+/* Bitfields in SR */
+#define SPI_RDRF_OFFSET                                0
+#define SPI_RDRF_SIZE                          1
+#define SPI_TDRE_OFFSET                                1
+#define SPI_TDRE_SIZE                          1
+#define SPI_MODF_OFFSET                                2
+#define SPI_MODF_SIZE                          1
+#define SPI_OVRES_OFFSET                       3
+#define SPI_OVRES_SIZE                         1
+#define SPI_ENDRX_OFFSET                       4
+#define SPI_ENDRX_SIZE                         1
+#define SPI_ENDTX_OFFSET                       5
+#define SPI_ENDTX_SIZE                         1
+#define SPI_RXBUFF_OFFSET                      6
+#define SPI_RXBUFF_SIZE                                1
+#define SPI_TXBUFE_OFFSET                      7
+#define SPI_TXBUFE_SIZE                                1
+#define SPI_NSSR_OFFSET                                8
+#define SPI_NSSR_SIZE                          1
+#define SPI_TXEMPTY_OFFSET                     9
+#define SPI_TXEMPTY_SIZE                       1
+#define SPI_SPIENS_OFFSET                      16
+#define SPI_SPIENS_SIZE                                1
+
+/* Bitfields in CSR0 */
+#define SPI_CPOL_OFFSET                                0
+#define SPI_CPOL_SIZE                          1
+#define SPI_NCPHA_OFFSET                       1
+#define SPI_NCPHA_SIZE                         1
+#define SPI_CSAAT_OFFSET                       3
+#define SPI_CSAAT_SIZE                         1
+#define SPI_BITS_OFFSET                                4
+#define SPI_BITS_SIZE                          4
+#define SPI_SCBR_OFFSET                                8
+#define SPI_SCBR_SIZE                          8
+#define SPI_DLYBS_OFFSET                       16
+#define SPI_DLYBS_SIZE                         8
+#define SPI_DLYBCT_OFFSET                      24
+#define SPI_DLYBCT_SIZE                                8
+
+/* Bitfields in RCR */
+#define SPI_RXCTR_OFFSET                       0
+#define SPI_RXCTR_SIZE                         16
+
+/* Bitfields in TCR */
+#define SPI_TXCTR_OFFSET                       0
+#define SPI_TXCTR_SIZE                         16
+
+/* Bitfields in RNCR */
+#define SPI_RXNCR_OFFSET                       0
+#define SPI_RXNCR_SIZE                         16
+
+/* Bitfields in TNCR */
+#define SPI_TXNCR_OFFSET                       0
+#define SPI_TXNCR_SIZE                         16
+
+/* Bitfields in PTCR */
+#define SPI_RXTEN_OFFSET                       0
+#define SPI_RXTEN_SIZE                         1
+#define SPI_RXTDIS_OFFSET                      1
+#define SPI_RXTDIS_SIZE                                1
+#define SPI_TXTEN_OFFSET                       8
+#define SPI_TXTEN_SIZE                         1
+#define SPI_TXTDIS_OFFSET                      9
+#define SPI_TXTDIS_SIZE                                1
+
+/* Constants for BITS */
+#define SPI_BITS_8_BPT                         0
+#define SPI_BITS_9_BPT                         1
+#define SPI_BITS_10_BPT                                2
+#define SPI_BITS_11_BPT                                3
+#define SPI_BITS_12_BPT                                4
+#define SPI_BITS_13_BPT                                5
+#define SPI_BITS_14_BPT                                6
+#define SPI_BITS_15_BPT                                7
+#define SPI_BITS_16_BPT                                8
+
+/* Bit manipulation macros */
+#define SPI_BIT(name) \
+       (1 << SPI_##name##_OFFSET)
+#define SPI_BF(name,value) \
+       (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
+#define SPI_BFEXT(name,value) \
+       (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
+#define SPI_BFINS(name,value,old) \
+       ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
+         | SPI_BF(name,value))
+
+/* Register access macros */
+#define spi_readl(port,reg) \
+       __raw_readl((port)->regs + SPI_##reg)
+#define spi_writel(port,reg,value) \
+       __raw_writel((value), (port)->regs + SPI_##reg)
+
 
 /*
  * The core SPI transfer engine just talks to a register bank to set up
similarity index 99%
rename from drivers/spi/au1550_spi.c
rename to drivers/spi/spi-au1550.c
index b50563d..bddee5f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * au1550_spi.c - au1550 psc spi controller driver
+ * au1550 psc spi controller driver
  * may work also with au1200, au1210, au1250
  * will not work on au1000, au1100 and au1500 (no full spi controller there)
  *
similarity index 98%
rename from drivers/spi/spi_bitbang.c
rename to drivers/spi/spi-bitbang.c
index 14a63f6..02d57fb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * spi_bitbang.c - polling/bitbanging SPI master controller driver utilities
+ * polling/bitbanging SPI master controller driver utilities
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ static unsigned bitbang_txrx_8(
        unsigned                ns,
        struct spi_transfer     *t
 ) {
-       unsigned                bits = spi->bits_per_word;
+       unsigned                bits = t->bits_per_word ? : spi->bits_per_word;
        unsigned                count = t->len;
        const u8                *tx = t->tx_buf;
        u8                      *rx = t->rx_buf;
@@ -94,7 +94,7 @@ static unsigned bitbang_txrx_16(
        unsigned                ns,
        struct spi_transfer     *t
 ) {
-       unsigned                bits = spi->bits_per_word;
+       unsigned                bits = t->bits_per_word ? : spi->bits_per_word;
        unsigned                count = t->len;
        const u16               *tx = t->tx_buf;
        u16                     *rx = t->rx_buf;
@@ -120,7 +120,7 @@ static unsigned bitbang_txrx_32(
        unsigned                ns,
        struct spi_transfer     *t
 ) {
-       unsigned                bits = spi->bits_per_word;
+       unsigned                bits = t->bits_per_word ? : spi->bits_per_word;
        unsigned                count = t->len;
        const u32               *tx = t->tx_buf;
        u32                     *rx = t->rx_buf;
similarity index 99%
rename from drivers/spi/spi_butterfly.c
rename to drivers/spi/spi-butterfly.c
index 0d4ceba..9f907ec 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * spi_butterfly.c - parport-to-butterfly adapter
+ * parport-to-butterfly adapter
  *
  * Copyright (C) 2005 David Brownell
  *
@@ -149,7 +149,7 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
 #define        spidelay(X)     do{}while(0)
 //#define      spidelay        ndelay
 
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
 
 static u32
 butterfly_txrx_word_mode0(struct spi_device *spi,
similarity index 98%
rename from drivers/spi/dw_spi_mid.c
rename to drivers/spi/spi-dw-mid.c
index 4891782..130e555 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * dw_spi_mid.c - special handling for DW core on Intel MID platform
+ * Special handling for DW core on Intel MID platform
  *
  * Copyright (c) 2009, Intel Corporation.
  *
@@ -23,7 +23,7 @@
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
 
-#include "dw_spi.h"
+#include "spi-dw.h"
 
 #ifdef CONFIG_SPI_DW_MID_DMA
 #include <linux/intel_mid_dma.h>
similarity index 97%
rename from drivers/spi/dw_spi_mmio.c
rename to drivers/spi/spi-dw-mmio.c
index e0e813d..34eb665 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core
+ * Memory-mapped interface driver for DW SPI Core
  *
  * Copyright (c) 2010, Octasic semiconductor.
  *
@@ -16,7 +16,7 @@
 #include <linux/spi/spi.h>
 #include <linux/scatterlist.h>
 
-#include "dw_spi.h"
+#include "spi-dw.h"
 
 #define DRIVER_NAME "dw_spi_mmio"
 
similarity index 98%
rename from drivers/spi/dw_spi_pci.c
rename to drivers/spi/spi-dw-pci.c
index ad260aa..c5f37f0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * dw_spi_pci.c - PCI interface driver for DW SPI Core
+ * PCI interface driver for DW SPI Core
  *
  * Copyright (c) 2009, Intel Corporation.
  *
@@ -22,7 +22,7 @@
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
 
-#include "dw_spi.h"
+#include "spi-dw.h"
 
 #define DRIVER_NAME "dw_spi_pci"
 
similarity index 99%
rename from drivers/spi/dw_spi.c
rename to drivers/spi/spi-dw.c
index 919fa9d..ece5f69 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
+ * Designware SPI core controller driver (refer pxa2xx_spi.c)
  *
  * Copyright (c) 2009, Intel Corporation.
  *
@@ -24,7 +24,7 @@
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
 
-#include "dw_spi.h"
+#include "spi-dw.h"
 
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
similarity index 100%
rename from drivers/spi/dw_spi.h
rename to drivers/spi/spi-dw.h
similarity index 77%
rename from drivers/spi/ep93xx_spi.c
rename to drivers/spi/spi-ep93xx.c
index d357007..1cf6454 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Driver for Cirrus Logic EP93xx SPI controller.
  *
- * Copyright (c) 2010 Mika Westerberg
+ * Copyright (C) 2010-2011 Mika Westerberg
  *
  * Explicit FIFO handling code was inspired by amba-pl022 driver.
  *
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/dmaengine.h>
 #include <linux/bitops.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
+#include <linux/scatterlist.h>
 #include <linux/spi/spi.h>
 
+#include <mach/dma.h>
 #include <mach/ep93xx_spi.h>
 
 #define SSPCR0                 0x0000
@@ -71,6 +74,7 @@
  * @pdev: pointer to platform device
  * @clk: clock for the controller
  * @regs_base: pointer to ioremap()'d registers
+ * @sspdr_phys: physical address of the SSPDR register
  * @irq: IRQ number used by the driver
  * @min_rate: minimum clock rate (in Hz) supported by the controller
  * @max_rate: maximum clock rate (in Hz) supported by the controller
  * @rx: current byte in transfer to receive
  * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  *              frame decreases this level and sending one frame increases it.
+ * @dma_rx: RX DMA channel
+ * @dma_tx: TX DMA channel
+ * @dma_rx_data: RX parameters passed to the DMA engine
+ * @dma_tx_data: TX parameters passed to the DMA engine
+ * @rx_sgt: sg table for RX transfers
+ * @tx_sgt: sg table for TX transfers
+ * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
+ *            the client
  *
  * This structure holds EP93xx SPI controller specific information. When
  * @running is %true, driver accepts transfer requests from protocol drivers.
@@ -100,6 +112,7 @@ struct ep93xx_spi {
        const struct platform_device    *pdev;
        struct clk                      *clk;
        void __iomem                    *regs_base;
+       unsigned long                   sspdr_phys;
        int                             irq;
        unsigned long                   min_rate;
        unsigned long                   max_rate;
@@ -112,6 +125,13 @@ struct ep93xx_spi {
        size_t                          tx;
        size_t                          rx;
        size_t                          fifo_level;
+       struct dma_chan                 *dma_rx;
+       struct dma_chan                 *dma_tx;
+       struct ep93xx_dma_data          dma_rx_data;
+       struct ep93xx_dma_data          dma_tx_data;
+       struct sg_table                 rx_sgt;
+       struct sg_table                 tx_sgt;
+       void                            *zeropage;
 };
 
 /**
@@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
                espi->fifo_level++;
        }
 
-       if (espi->rx == t->len) {
-               msg->actual_length += t->len;
+       if (espi->rx == t->len)
                return 0;
-       }
 
        return -EINPROGRESS;
 }
 
+static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
+{
+       /*
+        * Now everything is set up for the current transfer. We prime the TX
+        * FIFO, enable interrupts, and wait for the transfer to complete.
+        */
+       if (ep93xx_spi_read_write(espi)) {
+               ep93xx_spi_enable_interrupts(espi);
+               wait_for_completion(&espi->wait);
+       }
+}
+
+/**
+ * ep93xx_spi_dma_prepare() - prepares a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function configures the DMA, maps the buffer and prepares the DMA
+ * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
+ * in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
+{
+       struct spi_transfer *t = espi->current_msg->state;
+       struct dma_async_tx_descriptor *txd;
+       enum dma_slave_buswidth buswidth;
+       struct dma_slave_config conf;
+       struct scatterlist *sg;
+       struct sg_table *sgt;
+       struct dma_chan *chan;
+       const void *buf, *pbuf;
+       size_t len = t->len;
+       int i, ret, nents;
+
+       if (bits_per_word(espi) > 8)
+               buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+       else
+               buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+       memset(&conf, 0, sizeof(conf));
+       conf.direction = dir;
+
+       if (dir == DMA_FROM_DEVICE) {
+               chan = espi->dma_rx;
+               buf = t->rx_buf;
+               sgt = &espi->rx_sgt;
+
+               conf.src_addr = espi->sspdr_phys;
+               conf.src_addr_width = buswidth;
+       } else {
+               chan = espi->dma_tx;
+               buf = t->tx_buf;
+               sgt = &espi->tx_sgt;
+
+               conf.dst_addr = espi->sspdr_phys;
+               conf.dst_addr_width = buswidth;
+       }
+
+       ret = dmaengine_slave_config(chan, &conf);
+       if (ret)
+               return ERR_PTR(ret);
+
+       /*
+        * We need to split the transfer into PAGE_SIZE'd chunks. This is
+        * because we are using @espi->zeropage to provide a zero RX buffer
+        * for the TX transfers and we have only allocated one page for that.
+        *
+        * For performance reasons we allocate a new sg_table only when
+        * needed. Otherwise we will re-use the current one. Eventually the
+        * last sg_table is released in ep93xx_spi_release_dma().
+        */
+
+       nents = DIV_ROUND_UP(len, PAGE_SIZE);
+       if (nents != sgt->nents) {
+               sg_free_table(sgt);
+
+               ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
+
+       pbuf = buf;
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               size_t bytes = min_t(size_t, len, PAGE_SIZE);
+
+               if (buf) {
+                       sg_set_page(sg, virt_to_page(pbuf), bytes,
+                                   offset_in_page(pbuf));
+               } else {
+                       sg_set_page(sg, virt_to_page(espi->zeropage),
+                                   bytes, 0);
+               }
+
+               pbuf += bytes;
+               len -= bytes;
+       }
+
+       if (WARN_ON(len)) {
+               dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
+               return ERR_PTR(-EINVAL);
+       }
+
+       nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+       if (!nents)
+               return ERR_PTR(-ENOMEM);
+
+       txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
+                                                dir, DMA_CTRL_ACK);
+       if (!txd) {
+               dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+               return ERR_PTR(-ENOMEM);
+       }
+       return txd;
+}
+
+/**
+ * ep93xx_spi_dma_finish() - finishes with a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function finishes with the DMA transfer. After this, the DMA buffer is
+ * unmapped.
+ */
+static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
+                                 enum dma_data_direction dir)
+{
+       struct dma_chan *chan;
+       struct sg_table *sgt;
+
+       if (dir == DMA_FROM_DEVICE) {
+               chan = espi->dma_rx;
+               sgt = &espi->rx_sgt;
+       } else {
+               chan = espi->dma_tx;
+               sgt = &espi->tx_sgt;
+       }
+
+       dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+}
+
+static void ep93xx_spi_dma_callback(void *callback_param)
+{
+       complete(callback_param);
+}
+
+static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
+{
+       struct spi_message *msg = espi->current_msg;
+       struct dma_async_tx_descriptor *rxd, *txd;
+
+       rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
+       if (IS_ERR(rxd)) {
+               dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
+               msg->status = PTR_ERR(rxd);
+               return;
+       }
+
+       txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
+       if (IS_ERR(txd)) {
+               ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+               dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
+               msg->status = PTR_ERR(txd);
+               return;
+       }
+
+       /* We are ready when RX is done */
+       rxd->callback = ep93xx_spi_dma_callback;
+       rxd->callback_param = &espi->wait;
+
+       /* Now submit both descriptors and wait while they finish */
+       dmaengine_submit(rxd);
+       dmaengine_submit(txd);
+
+       dma_async_issue_pending(espi->dma_rx);
+       dma_async_issue_pending(espi->dma_tx);
+
+       wait_for_completion(&espi->wait);
+
+       ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
+       ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+}
+
 /**
  * ep93xx_spi_process_transfer() - processes one SPI transfer
  * @espi: ep93xx SPI controller struct
@@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
        espi->tx = 0;
 
        /*
-        * Now everything is set up for the current transfer. We prime the TX
-        * FIFO, enable interrupts, and wait for the transfer to complete.
+        * There is no point of setting up DMA for the transfers which will
+        * fit into the FIFO and can be transferred with a single interrupt.
+        * So in these cases we will be using PIO and don't bother for DMA.
         */
-       if (ep93xx_spi_read_write(espi)) {
-               ep93xx_spi_enable_interrupts(espi);
-               wait_for_completion(&espi->wait);
-       }
+       if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
+               ep93xx_spi_dma_transfer(espi);
+       else
+               ep93xx_spi_pio_transfer(espi);
 
        /*
         * In case of error during transmit, we bail out from processing
@@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
        if (msg->status)
                return;
 
+       msg->actual_length += t->len;
+
        /*
         * After this transfer is finished, perform any possible
         * post-transfer actions requested by the protocol driver.
@@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+       if (ep93xx_dma_chan_is_m2p(chan))
+               return false;
+
+       chan->private = filter_param;
+       return true;
+}
+
+static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
+{
+       dma_cap_mask_t mask;
+       int ret;
+
+       espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!espi->zeropage)
+               return -ENOMEM;
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+
+       espi->dma_rx_data.port = EP93XX_DMA_SSP;
+       espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+       espi->dma_rx_data.name = "ep93xx-spi-rx";
+
+       espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+                                          &espi->dma_rx_data);
+       if (!espi->dma_rx) {
+               ret = -ENODEV;
+               goto fail_free_page;
+       }
+
+       espi->dma_tx_data.port = EP93XX_DMA_SSP;
+       espi->dma_tx_data.direction = DMA_TO_DEVICE;
+       espi->dma_tx_data.name = "ep93xx-spi-tx";
+
+       espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+                                          &espi->dma_tx_data);
+       if (!espi->dma_tx) {
+               ret = -ENODEV;
+               goto fail_release_rx;
+       }
+
+       return 0;
+
+fail_release_rx:
+       dma_release_channel(espi->dma_rx);
+       espi->dma_rx = NULL;
+fail_free_page:
+       free_page((unsigned long)espi->zeropage);
+
+       return ret;
+}
+
+static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
+{
+       if (espi->dma_rx) {
+               dma_release_channel(espi->dma_rx);
+               sg_free_table(&espi->rx_sgt);
+       }
+       if (espi->dma_tx) {
+               dma_release_channel(espi->dma_tx);
+               sg_free_table(&espi->tx_sgt);
+       }
+
+       if (espi->zeropage)
+               free_page((unsigned long)espi->zeropage);
+}
+
 static int __init ep93xx_spi_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
@@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
                goto fail_put_clock;
        }
 
+       espi->sspdr_phys = res->start + SSPDR;
        espi->regs_base = ioremap(res->start, resource_size(res));
        if (!espi->regs_base) {
                dev_err(&pdev->dev, "failed to map resources\n");
@@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
                goto fail_unmap_regs;
        }
 
+       if (info->use_dma && ep93xx_spi_setup_dma(espi))
+               dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
+
        espi->wq = create_singlethread_workqueue("ep93xx_spid");
        if (!espi->wq) {
                dev_err(&pdev->dev, "unable to create workqueue\n");
-               goto fail_free_irq;
+               goto fail_free_dma;
        }
        INIT_WORK(&espi->msg_work, ep93xx_spi_work);
        INIT_LIST_HEAD(&espi->msg_queue);
@@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
 
 fail_free_queue:
        destroy_workqueue(espi->wq);
-fail_free_irq:
+fail_free_dma:
+       ep93xx_spi_release_dma(espi);
        free_irq(espi->irq, espi);
 fail_unmap_regs:
        iounmap(espi->regs_base);
@@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev)
        }
        spin_unlock_irq(&espi->lock);
 
+       ep93xx_spi_release_dma(espi);
        free_irq(espi->irq, espi);
        iounmap(espi->regs_base);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
similarity index 99%
rename from drivers/spi/spi_fsl_espi.c
rename to drivers/spi/spi-fsl-espi.c
index 496f895..54e499d 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/err.h>
 #include <sysdev/fsl_soc.h>
 
-#include "spi_fsl_lib.h"
+#include "spi-fsl-lib.h"
 
 /* eSPI Controller registers */
 struct fsl_espi_reg {
similarity index 99%
rename from drivers/spi/spi_fsl_lib.c
rename to drivers/spi/spi-fsl-lib.c
index ff59f42..2674fad 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/of_spi.h>
 #include <sysdev/fsl_soc.h>
 
-#include "spi_fsl_lib.h"
+#include "spi-fsl-lib.h"
 
 #define MPC8XXX_SPI_RX_BUF(type)                                         \
 void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
similarity index 99%
rename from drivers/spi/spi_fsl_spi.c
rename to drivers/spi/spi-fsl-spi.c
index 7963c9b..e013117 100644 (file)
@@ -37,7 +37,7 @@
 #include <asm/cpm.h>
 #include <asm/qe.h>
 
-#include "spi_fsl_lib.h"
+#include "spi-fsl-lib.h"
 
 /* CPM1 and CPM2 are mutually exclusive. */
 #ifdef CONFIG_CPM1
similarity index 99%
rename from drivers/spi/spi_gpio.c
rename to drivers/spi/spi-gpio.c
index 63e51b0..0e88ab7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * spi_gpio.c - SPI master driver using generic bitbanged GPIO
+ * SPI master driver using generic bitbanged GPIO
  *
  * Copyright (C) 2006,2008 David Brownell
  *
@@ -69,7 +69,7 @@ struct spi_gpio {
  *             #define SPI_MOSI_GPIO   120
  *             #define SPI_SCK_GPIO    121
  *             #define SPI_N_CHIPSEL   4
- *             #include "spi_gpio.c"
+ *             #include "spi-gpio.c"
  */
 
 #ifndef DRIVER_NAME
@@ -127,7 +127,7 @@ static inline int getmiso(const struct spi_device *spi)
  */
 #define spidelay(nsecs)        do {} while (0)
 
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
 
 /*
  * These functions can leverage inline expansion of GPIO calls to shrink
similarity index 100%
rename from drivers/spi/spi_imx.c
rename to drivers/spi/spi-imx.c
similarity index 98%
rename from drivers/spi/spi_lm70llp.c
rename to drivers/spi/spi-lm70llp.c
index 7746a41..933eb9d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * spi_lm70llp.c - driver for LM70EVAL-LLP board for the LM70 sensor
+ * Driver for LM70EVAL-LLP board for the LM70 sensor
  *
  * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
  *
@@ -174,7 +174,7 @@ static inline int getmiso(struct spi_device *s)
 }
 /*--------------------------------------------------------------------*/
 
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
 
 static void lm70_chipselect(struct spi_device *spi, int value)
 {
similarity index 99%
rename from drivers/spi/spi_nuc900.c
rename to drivers/spi/spi-nuc900.c
index 3cd15f6..c0a6ce8 100644 (file)
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_nuc900.c
- *
+/*
  * Copyright (c) 2009 Nuvoton technology.
  * Wan ZongShun <mcuos.com@gmail.com>
  *
@@ -7,7 +6,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  *
-*/
+ */
 
 #include <linux/init.h>
 #include <linux/spinlock.h>
similarity index 99%
rename from drivers/spi/omap_uwire.c
rename to drivers/spi/spi-omap-uwire.c
index 160d326..00a8e9d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * omap_uwire.c -- MicroWire interface driver for OMAP
+ * MicroWire interface driver for OMAP
  *
  * Copyright 2003 MontaVista Software Inc. <source@mvista.com>
  *
similarity index 99%
rename from drivers/spi/omap2_mcspi.c
rename to drivers/spi/spi-omap2-mcspi.c
index 969cdd2..fde3a2d 100644 (file)
@@ -1116,8 +1116,8 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
                status = -ENODEV;
                goto err1;
        }
-       if (!request_mem_region(r->start, (r->end - r->start) + 1,
-                       dev_name(&pdev->dev))) {
+       if (!request_mem_region(r->start, resource_size(r),
+                               dev_name(&pdev->dev))) {
                status = -EBUSY;
                goto err1;
        }
@@ -1125,7 +1125,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
        r->start += pdata->regs_offset;
        r->end += pdata->regs_offset;
        mcspi->phys = r->start;
-       mcspi->base = ioremap(r->start, r->end - r->start + 1);
+       mcspi->base = ioremap(r->start, resource_size(r));
        if (!mcspi->base) {
                dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
                status = -ENOMEM;
@@ -1190,7 +1190,7 @@ err4:
 err3:
        kfree(mcspi->dma_channels);
 err2:
-       release_mem_region(r->start, (r->end - r->start) + 1);
+       release_mem_region(r->start, resource_size(r));
        iounmap(mcspi->base);
 err1:
        return status;
@@ -1210,7 +1210,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
 
        omap2_mcspi_disable_clocks(mcspi);
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(r->start, (r->end - r->start) + 1);
+       release_mem_region(r->start, resource_size(r));
 
        base = mcspi->base;
        spi_unregister_master(master);
similarity index 98%
rename from drivers/spi/orion_spi.c
rename to drivers/spi/spi-orion.c
index 0b677dc..9421a39 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * orion_spi.c -- Marvell Orion SPI controller driver
+ * Marvell Orion SPI controller driver
  *
  * Author: Shadi Ammouri <shadi@marvell.com>
  * Copyright (C) 2007-2008 Marvell Ltd.
@@ -489,7 +489,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
                goto out;
        }
 
-       if (!request_mem_region(r->start, (r->end - r->start) + 1,
+       if (!request_mem_region(r->start, resource_size(r),
                                dev_name(&pdev->dev))) {
                status = -EBUSY;
                goto out;
@@ -511,7 +511,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
        return status;
 
 out_rel_mem:
-       release_mem_region(r->start, (r->end - r->start) + 1);
+       release_mem_region(r->start, resource_size(r));
 
 out:
        spi_master_put(master);
@@ -531,7 +531,7 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
        cancel_work_sync(&spi->work);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(r->start, (r->end - r->start) + 1);
+       release_mem_region(r->start, resource_size(r));
 
        spi_unregister_master(master);
 
similarity index 96%
rename from drivers/spi/amba-pl022.c
rename to drivers/spi/spi-pl022.c
index d18ce9e..eba88c7 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * drivers/spi/amba-pl022.c
- *
  * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
  *
  * Copyright (C) 2008-2009 ST-Ericsson AB
@@ -42,6 +40,7 @@
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/scatterlist.h>
+#include <linux/pm_runtime.h>
 
 /*
  * This macro is used to define some register default values.
@@ -383,6 +382,8 @@ struct pl022 {
        enum ssp_reading                read;
        enum ssp_writing                write;
        u32                             exp_fifo_level;
+       enum ssp_rx_level_trig          rx_lev_trig;
+       enum ssp_tx_level_trig          tx_lev_trig;
        /* DMA settings */
 #ifdef CONFIG_DMA_ENGINE
        struct dma_chan                 *dma_rx_channel;
@@ -517,6 +518,7 @@ static void giveback(struct pl022 *pl022)
        clk_disable(pl022->clk);
        amba_pclk_disable(pl022->adev);
        amba_vcore_disable(pl022->adev);
+       pm_runtime_put(&pl022->adev->dev);
 }
 
 /**
@@ -909,12 +911,10 @@ static int configure_dma(struct pl022 *pl022)
        struct dma_slave_config rx_conf = {
                .src_addr = SSP_DR(pl022->phybase),
                .direction = DMA_FROM_DEVICE,
-               .src_maxburst = pl022->vendor->fifodepth >> 1,
        };
        struct dma_slave_config tx_conf = {
                .dst_addr = SSP_DR(pl022->phybase),
                .direction = DMA_TO_DEVICE,
-               .dst_maxburst = pl022->vendor->fifodepth >> 1,
        };
        unsigned int pages;
        int ret;
@@ -928,6 +928,54 @@ static int configure_dma(struct pl022 *pl022)
        if (!rxchan || !txchan)
                return -ENODEV;
 
+       /*
+        * If supplied, the DMA burstsize should equal the FIFO trigger level.
+        * Notice that the DMA engine uses one-to-one mapping. Since we can
+        * not trigger on 2 elements this needs explicit mapping rather than
+        * calculation.
+        */
+       switch (pl022->rx_lev_trig) {
+       case SSP_RX_1_OR_MORE_ELEM:
+               rx_conf.src_maxburst = 1;
+               break;
+       case SSP_RX_4_OR_MORE_ELEM:
+               rx_conf.src_maxburst = 4;
+               break;
+       case SSP_RX_8_OR_MORE_ELEM:
+               rx_conf.src_maxburst = 8;
+               break;
+       case SSP_RX_16_OR_MORE_ELEM:
+               rx_conf.src_maxburst = 16;
+               break;
+       case SSP_RX_32_OR_MORE_ELEM:
+               rx_conf.src_maxburst = 32;
+               break;
+       default:
+               rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
+               break;
+       }
+
+       switch (pl022->tx_lev_trig) {
+       case SSP_TX_1_OR_MORE_EMPTY_LOC:
+               tx_conf.dst_maxburst = 1;
+               break;
+       case SSP_TX_4_OR_MORE_EMPTY_LOC:
+               tx_conf.dst_maxburst = 4;
+               break;
+       case SSP_TX_8_OR_MORE_EMPTY_LOC:
+               tx_conf.dst_maxburst = 8;
+               break;
+       case SSP_TX_16_OR_MORE_EMPTY_LOC:
+               tx_conf.dst_maxburst = 16;
+               break;
+       case SSP_TX_32_OR_MORE_EMPTY_LOC:
+               tx_conf.dst_maxburst = 32;
+               break;
+       default:
+               tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
+               break;
+       }
+
        switch (pl022->read) {
        case READING_NULL:
                /* Use the same as for writing */
@@ -1496,6 +1544,7 @@ static void pump_messages(struct work_struct *work)
         * and core will be disabled when giveback() is called in each method
         * (poll/interrupt/DMA)
         */
+       pm_runtime_get_sync(&pl022->adev->dev);
        amba_vcore_enable(pl022->adev);
        amba_pclk_enable(pl022->adev);
        clk_enable(pl022->clk);
@@ -1629,17 +1678,57 @@ static int verify_controller_parameters(struct pl022 *pl022,
                        "Communication mode is configured incorrectly\n");
                return -EINVAL;
        }
-       if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
-           || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
+       switch (chip_info->rx_lev_trig) {
+       case SSP_RX_1_OR_MORE_ELEM:
+       case SSP_RX_4_OR_MORE_ELEM:
+       case SSP_RX_8_OR_MORE_ELEM:
+               /* These are always OK, all variants can handle this */
+               break;
+       case SSP_RX_16_OR_MORE_ELEM:
+               if (pl022->vendor->fifodepth < 16) {
+                       dev_err(&pl022->adev->dev,
+                       "RX FIFO Trigger Level is configured incorrectly\n");
+                       return -EINVAL;
+               }
+               break;
+       case SSP_RX_32_OR_MORE_ELEM:
+               if (pl022->vendor->fifodepth < 32) {
+                       dev_err(&pl022->adev->dev,
+                       "RX FIFO Trigger Level is configured incorrectly\n");
+                       return -EINVAL;
+               }
+               break;
+       default:
                dev_err(&pl022->adev->dev,
                        "RX FIFO Trigger Level is configured incorrectly\n");
                return -EINVAL;
+               break;
        }
-       if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
-           || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
+       switch (chip_info->tx_lev_trig) {
+       case SSP_TX_1_OR_MORE_EMPTY_LOC:
+       case SSP_TX_4_OR_MORE_EMPTY_LOC:
+       case SSP_TX_8_OR_MORE_EMPTY_LOC:
+               /* These are always OK, all variants can handle this */
+               break;
+       case SSP_TX_16_OR_MORE_EMPTY_LOC:
+               if (pl022->vendor->fifodepth < 16) {
+                       dev_err(&pl022->adev->dev,
+                       "TX FIFO Trigger Level is configured incorrectly\n");
+                       return -EINVAL;
+               }
+               break;
+       case SSP_TX_32_OR_MORE_EMPTY_LOC:
+               if (pl022->vendor->fifodepth < 32) {
+                       dev_err(&pl022->adev->dev,
+                       "TX FIFO Trigger Level is configured incorrectly\n");
+                       return -EINVAL;
+               }
+               break;
+       default:
                dev_err(&pl022->adev->dev,
                        "TX FIFO Trigger Level is configured incorrectly\n");
                return -EINVAL;
+               break;
        }
        if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
                if ((chip_info->ctrl_len < SSP_BITS_4)
@@ -1874,6 +1963,9 @@ static int pl022_setup(struct spi_device *spi)
                goto err_config_params;
        }
 
+       pl022->rx_lev_trig = chip_info->rx_lev_trig;
+       pl022->tx_lev_trig = chip_info->tx_lev_trig;
+
        /* Now set controller state based on controller data */
        chip->xfer_type = chip_info->com_mode;
        if (!chip_info->cs_control) {
@@ -2094,6 +2186,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
        }
        printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
               adev->res.start, pl022->virtbase);
+       pm_runtime_enable(dev);
+       pm_runtime_resume(dev);
 
        pl022->clk = clk_get(&adev->dev, NULL);
        if (IS_ERR(pl022->clk)) {
@@ -2155,6 +2249,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
        destroy_queue(pl022);
        pl022_dma_remove(pl022);
        free_irq(adev->irq[0], pl022);
+       pm_runtime_disable(&adev->dev);
  err_no_irq:
        clk_put(pl022->clk);
  err_no_clk:
similarity index 99%
rename from drivers/spi/spi_ppc4xx.c
rename to drivers/spi/spi-ppc4xx.c
index 2a298c0..b267fd9 100644 (file)
@@ -502,7 +502,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
                goto free_gpios;
        }
        hw->mapbase = resource.start;
-       hw->mapsize = resource.end - resource.start + 1;
+       hw->mapsize = resource_size(&resource);
 
        /* Sanity check */
        if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
similarity index 99%
rename from drivers/spi/spi_s3c24xx_fiq.S
rename to drivers/spi/spi-s3c24xx-fiq.S
index 3793cae..059f2dc 100644 (file)
@@ -17,7 +17,7 @@
 #include <mach/regs-irq.h>
 #include <plat/regs-spi.h>
 
-#include "spi_s3c24xx_fiq.h"
+#include "spi-s3c24xx-fiq.h"
 
        .text
 
similarity index 98%
rename from drivers/spi/spi_s3c24xx_gpio.c
rename to drivers/spi/spi-s3c24xx-gpio.c
index be99135..2d3c085 100644 (file)
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_s3c24xx_gpio.c
- *
+/*
  * Copyright (c) 2006 Ben Dooks
  * Copyright (c) 2006 Simtec Electronics
  *
@@ -58,7 +57,7 @@ static inline u32 getmiso(struct spi_device *dev)
 
 #define spidelay(x) ndelay(x)
 
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
 
 
 static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi,
similarity index 99%
rename from drivers/spi/spi_s3c24xx.c
rename to drivers/spi/spi-s3c24xx.c
index 1a5fcab..1996ac5 100644 (file)
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_s3c24xx.c
- *
+/*
  * Copyright (c) 2006 Ben Dooks
  * Copyright 2006-2009 Simtec Electronics
  *     Ben Dooks <ben@simtec.co.uk>
@@ -32,7 +31,7 @@
 #include <plat/fiq.h>
 #include <asm/fiq.h>
 
-#include "spi_s3c24xx_fiq.h"
+#include "spi-s3c24xx-fiq.h"
 
 /**
  * s3c24xx_spi_devstate - per device data
similarity index 99%
rename from drivers/spi/spi_s3c64xx.c
rename to drivers/spi/spi-s3c64xx.c
index 795828b..75e3a9b 100644 (file)
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_s3c64xx.c
- *
+/*
  * Copyright (C) 2009 Samsung Electronics Ltd.
  *     Jaswinder Singh <jassi.brar@samsung.com>
  *
similarity index 99%
rename from drivers/spi/spi_sh_sci.c
rename to drivers/spi/spi-sh-sci.c
index 5c64391..e7779c0 100644 (file)
@@ -78,7 +78,7 @@ static inline u32 getmiso(struct spi_device *dev)
 
 #define spidelay(x) ndelay(x)
 
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
 
 static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
                                      unsigned nsecs, u32 word, u8 bits)
similarity index 100%
rename from drivers/spi/spi_sh.c
rename to drivers/spi/spi-sh.c
similarity index 96%
rename from drivers/spi/spi_tegra.c
rename to drivers/spi/spi-tegra.c
index 6c3aa6e..a43ceeb 100644 (file)
@@ -498,14 +498,14 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
                goto err0;
        }
 
-       if (!request_mem_region(r->start, (r->end - r->start) + 1,
+       if (!request_mem_region(r->start, resource_size(r),
                                dev_name(&pdev->dev))) {
                ret = -EBUSY;
                goto err0;
        }
 
        tspi->phys = r->start;
-       tspi->base = ioremap(r->start, r->end - r->start + 1);
+       tspi->base = ioremap(r->start, resource_size(r));
        if (!tspi->base) {
                dev_err(&pdev->dev, "can't ioremap iomem\n");
                ret = -ENOMEM;
@@ -546,6 +546,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
        tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
        tspi->rx_dma_req.dev = tspi;
 
+       master->dev.of_node = pdev->dev.of_node;
        ret = spi_register_master(master);
 
        if (ret < 0)
@@ -563,7 +564,7 @@ err3:
 err2:
        iounmap(tspi->base);
 err1:
-       release_mem_region(r->start, (r->end - r->start) + 1);
+       release_mem_region(r->start, resource_size(r));
 err0:
        spi_master_put(master);
        return ret;
@@ -588,17 +589,28 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
        iounmap(tspi->base);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(r->start, (r->end - r->start) + 1);
+       release_mem_region(r->start, resource_size(r));
 
        return 0;
 }
 
 MODULE_ALIAS("platform:spi_tegra");
 
+#ifdef CONFIG_OF
+static struct of_device_id spi_tegra_of_match_table[] __devinitdata = {
+       { .compatible = "nvidia,tegra250-spi", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table);
+#else /* CONFIG_OF */
+#define spi_tegra_of_match_table NULL
+#endif /* CONFIG_OF */
+
 static struct platform_driver spi_tegra_driver = {
        .driver = {
                .name =         "spi_tegra",
                .owner =        THIS_MODULE,
+               .of_match_table = spi_tegra_of_match_table,
        },
        .remove =       __devexit_p(spi_tegra_remove),
 };
similarity index 99%
rename from drivers/spi/tle62x0.c
rename to drivers/spi/spi-tle62x0.c
index 32a4087..940e73d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * tle62x0.c -- support Infineon TLE62x0 driver chips
+ * Support Infineon TLE62x0 driver chips
  *
  * Copyright (c) 2007 Simtec Electronics
  *     Ben Dooks, <ben@simtec.co.uk>
similarity index 53%
rename from drivers/spi/spi_topcliff_pch.c
rename to drivers/spi/spi-topcliff-pch.c
index 79e48d4..1d23f38 100644 (file)
 #include <linux/spi/spidev.h>
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/dmaengine.h>
+#include <linux/pch_dma.h>
 
 /* Register offsets */
 #define PCH_SPCR               0x00    /* SPI control register */
@@ -35,6 +39,7 @@
 #define PCH_SPDRR              0x10    /* SPI read data register */
 #define PCH_SSNXCR             0x18    /* SSN Expand Control Register */
 #define PCH_SRST               0x1C    /* SPI reset register */
+#define PCH_ADDRESS_SIZE       0x20
 
 #define PCH_SPSR_TFD           0x000007C0
 #define PCH_SPSR_RFD           0x0000F800
@@ -52,8 +57,6 @@
 #define STATUS_EXITING         2
 #define PCH_SLEEP_TIME         10
 
-#define PCH_ADDRESS_SIZE       0x20
-
 #define SSN_LOW                        0x02U
 #define SSN_NO_CONTROL         0x00U
 #define PCH_MAX_CS             0xFF
 #define SPSR_TFI_BIT           (1 << 0)
 #define SPSR_RFI_BIT           (1 << 1)
 #define SPSR_FI_BIT            (1 << 2)
+#define SPSR_ORF_BIT           (1 << 3)
 #define SPBRR_SIZE_BIT         (1 << 10)
 
-#define PCH_ALL                        (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
+#define PCH_ALL                        (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
+                               SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
 
 #define SPCR_RFIC_FIELD                20
 #define SPCR_TFIC_FIELD                16
 
-#define SPSR_INT_BITS          0x1F
-#define MASK_SPBRR_SPBR_BITS   (~((1 << 10) - 1))
-#define MASK_RFIC_SPCR_BITS    (~(0xf << 20))
-#define MASK_TFIC_SPCR_BITS    (~(0xf000f << 12))
+#define MASK_SPBRR_SPBR_BITS   ((1 << 10) - 1)
+#define MASK_RFIC_SPCR_BITS    (0xf << SPCR_RFIC_FIELD)
+#define MASK_TFIC_SPCR_BITS    (0xf << SPCR_TFIC_FIELD)
 
 #define PCH_CLOCK_HZ           50000000
 #define PCH_MAX_SPBR           1023
 
+/* Definition for ML7213 by OKI SEMICONDUCTOR */
+#define PCI_VENDOR_ID_ROHM             0x10DB
+#define PCI_DEVICE_ID_ML7213_SPI       0x802c
+#define PCI_DEVICE_ID_ML7223_SPI       0x800F
 
+/*
+ * Set the number of SPI instance max
+ * Intel EG20T PCH :           1ch
+ * OKI SEMICONDUCTOR ML7213 IOH :      2ch
+ * OKI SEMICONDUCTOR ML7223 IOH :      1ch
+*/
+#define PCH_SPI_MAX_DEV                        2
+
+#define PCH_BUF_SIZE           4096
+#define PCH_DMA_TRANS_SIZE     12
+
+static int use_dma = 1;
+
+struct pch_spi_dma_ctrl {
+       struct dma_async_tx_descriptor  *desc_tx;
+       struct dma_async_tx_descriptor  *desc_rx;
+       struct pch_dma_slave            param_tx;
+       struct pch_dma_slave            param_rx;
+       struct dma_chan         *chan_tx;
+       struct dma_chan         *chan_rx;
+       struct scatterlist              *sg_tx_p;
+       struct scatterlist              *sg_rx_p;
+       struct scatterlist              sg_tx;
+       struct scatterlist              sg_rx;
+       int                             nent;
+       void                            *tx_buf_virt;
+       void                            *rx_buf_virt;
+       dma_addr_t                      tx_buf_dma;
+       dma_addr_t                      rx_buf_dma;
+};
 /**
  * struct pch_spi_data - Holds the SPI channel specific details
  * @io_remap_addr:             The remapped PCI base address
  * @cur_trans:                 The current transfer that this SPI driver is
  *                             handling
  * @board_dat:                 Reference to the SPI device data structure
+ * @plat_dev:                  platform_device structure
+ * @ch:                                SPI channel number
+ * @irq_reg_sts:               Status of IRQ registration
  */
 struct pch_spi_data {
        void __iomem *io_remap_addr;
+       unsigned long io_base_addr;
        struct spi_master *master;
        struct work_struct work;
        struct workqueue_struct *wk;
@@ -144,27 +186,36 @@ struct pch_spi_data {
        struct spi_message *current_msg;
        struct spi_transfer *cur_trans;
        struct pch_spi_board_data *board_dat;
+       struct platform_device  *plat_dev;
+       int ch;
+       struct pch_spi_dma_ctrl dma;
+       int use_dma;
+       u8 irq_reg_sts;
 };
 
 /**
  * struct pch_spi_board_data - Holds the SPI device specific details
  * @pdev:              Pointer to the PCI device
- * @irq_reg_sts:       Status of IRQ registration
- * @pci_req_sts:       Status of pci_request_regions
  * @suspend_sts:       Status of suspend
- * @data:              Pointer to SPI channel data structure
+ * @num:               The number of SPI device instance
  */
 struct pch_spi_board_data {
        struct pci_dev *pdev;
-       u8 irq_reg_sts;
-       u8 pci_req_sts;
        u8 suspend_sts;
-       struct pch_spi_data *data;
+       int num;
+};
+
+struct pch_pd_dev_save {
+       int num;
+       struct platform_device *pd_save[PCH_SPI_MAX_DEV];
+       struct pch_spi_board_data *board_dat;
 };
 
 static struct pci_device_id pch_spi_pcidev_id[] = {
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)},
-       {0,}
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI),    1, },
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
+       { }
 };
 
 /**
@@ -251,10 +302,10 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
                        reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
 
                        /* reset rx threshold */
-                       reg_spcr_val &= MASK_RFIC_SPCR_BITS;
+                       reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
                        reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
-                       iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))),
-                                (io_remap_addr + PCH_SPCR));
+
+                       iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
                }
 
                /* update counts */
@@ -265,12 +316,15 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
 
        /* if transfer complete interrupt */
        if (reg_spsr_val & SPSR_FI_BIT) {
-               /* disable FI & RFI interrupts */
-               pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
-                                  SPCR_FIE_BIT | SPCR_RFIE_BIT);
+               if (tx_index < bpw_len)
+                       dev_err(&data->master->dev,
+                               "%s : Transfer is not completed", __func__);
+               /* disable interrupts */
+               pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
 
                /* transfer is completed;inform pch_spi_process_messages */
                data->transfer_complete = true;
+               data->transfer_active = false;
                wake_up(&data->wait);
        }
 }
@@ -283,24 +337,28 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
 static irqreturn_t pch_spi_handler(int irq, void *dev_id)
 {
        u32 reg_spsr_val;
-       struct pch_spi_data *data;
        void __iomem *spsr;
        void __iomem *io_remap_addr;
        irqreturn_t ret = IRQ_NONE;
-       struct pch_spi_board_data *board_dat = dev_id;
+       struct pch_spi_data *data = dev_id;
+       struct pch_spi_board_data *board_dat = data->board_dat;
 
        if (board_dat->suspend_sts) {
                dev_dbg(&board_dat->pdev->dev,
                        "%s returning due to suspend\n", __func__);
                return IRQ_NONE;
        }
+       if (data->use_dma)
+               return IRQ_NONE;
 
-       data = board_dat->data;
        io_remap_addr = data->io_remap_addr;
        spsr = io_remap_addr + PCH_SPSR;
 
        reg_spsr_val = ioread32(spsr);
 
+       if (reg_spsr_val & SPSR_ORF_BIT)
+               dev_err(&board_dat->pdev->dev, "%s Over run error", __func__);
+
        /* Check if the interrupt is for SPI device */
        if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
                pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
@@ -326,7 +384,7 @@ static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
        if (n_spbr > PCH_MAX_SPBR)
                n_spbr = PCH_MAX_SPBR;
 
-       pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS);
+       pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
 }
 
 /**
@@ -435,26 +493,27 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
        dev_dbg(&pspi->dev, "%s Transfer List not empty. "
                "Transfer Speed is set.\n", __func__);
 
+       spin_lock_irqsave(&data->lock, flags);
        /* validate Tx/Rx buffers and Transfer length */
        list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
                if (!transfer->tx_buf && !transfer->rx_buf) {
                        dev_err(&pspi->dev,
                                "%s Tx and Rx buffer NULL\n", __func__);
                        retval = -EINVAL;
-                       goto err_out;
+                       goto err_return_spinlock;
                }
 
                if (!transfer->len) {
                        dev_err(&pspi->dev, "%s Transfer length invalid\n",
                                __func__);
                        retval = -EINVAL;
-                       goto err_out;
+                       goto err_return_spinlock;
                }
 
                dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
                        " valid\n", __func__);
 
-               /* if baud rate hs been specified validate the same */
+               /* if baud rate has been specified validate the same */
                if (transfer->speed_hz > PCH_MAX_BAUDRATE)
                        transfer->speed_hz = PCH_MAX_BAUDRATE;
 
@@ -465,25 +524,24 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
                                retval = -EINVAL;
                                dev_err(&pspi->dev,
                                        "%s Invalid bits per word\n", __func__);
-                               goto err_out;
+                               goto err_return_spinlock;
                        }
                }
        }
-
-       spin_lock_irqsave(&data->lock, flags);
+       spin_unlock_irqrestore(&data->lock, flags);
 
        /* We won't process any messages if we have been asked to terminate */
        if (data->status == STATUS_EXITING) {
                dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
                retval = -ESHUTDOWN;
-               goto err_return_spinlock;
+               goto err_out;
        }
 
        /* If suspended ,return -EINVAL */
        if (data->board_dat->suspend_sts) {
                dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
                retval = -EINVAL;
-               goto err_return_spinlock;
+               goto err_out;
        }
 
        /* set status of message */
@@ -491,9 +549,11 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
        dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
 
        pmsg->status = -EINPROGRESS;
-
+       spin_lock_irqsave(&data->lock, flags);
        /* add message to queue */
        list_add_tail(&pmsg->queue, &data->queue);
+       spin_unlock_irqrestore(&data->lock, flags);
+
        dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
 
        /* schedule work queue to run */
@@ -502,11 +562,13 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
 
        retval = 0;
 
-err_return_spinlock:
-       spin_unlock_irqrestore(&data->lock, flags);
 err_out:
        dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
        return retval;
+err_return_spinlock:
+       dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
+       spin_unlock_irqrestore(&data->lock, flags);
+       return retval;
 }
 
 static inline void pch_spi_select_chip(struct pch_spi_data *data,
@@ -527,8 +589,7 @@ static inline void pch_spi_select_chip(struct pch_spi_data *data,
        pch_spi_setup_transfer(pspi);
 }
 
-static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
-                          struct spi_message **ppmsg)
+static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
 {
        int size;
        u32 n_writes;
@@ -537,8 +598,6 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
        const u8 *tx_buf;
        const u16 *tx_sbuf;
 
-       pmsg = *ppmsg;
-
        /* set baud rate if needed */
        if (data->cur_trans->speed_hz) {
                dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
@@ -621,10 +680,9 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
        data->transfer_active = true;
 }
 
-
-static void pch_spi_nomore_transfer(struct pch_spi_data *data,
-                                               struct spi_message *pmsg)
+static void pch_spi_nomore_transfer(struct pch_spi_data *data)
 {
+       struct spi_message *pmsg;
        dev_dbg(&data->master->dev, "%s called\n", __func__);
        /* Invoke complete callback
         * [To the spi core..indicating end of transfer] */
@@ -675,29 +733,21 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data,
 
 static void pch_spi_set_ir(struct pch_spi_data *data)
 {
-       /* enable interrupts */
-       if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) {
+       /* enable interrupts, set threshold, enable SPI */
+       if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
                /* set receive threshold to PCH_RX_THOLD */
                pch_spi_setclr_reg(data->master, PCH_SPCR,
-                                  PCH_RX_THOLD << SPCR_RFIC_FIELD,
-                                  ~MASK_RFIC_SPCR_BITS);
-               /* enable FI and RFI interrupts */
-               pch_spi_setclr_reg(data->master, PCH_SPCR,
-                                  SPCR_RFIE_BIT | SPCR_FIE_BIT, 0);
-       } else {
+                                  PCH_RX_THOLD << SPCR_RFIC_FIELD |
+                                  SPCR_FIE_BIT | SPCR_RFIE_BIT |
+                                  SPCR_ORIE_BIT | SPCR_SPE_BIT,
+                                  MASK_RFIC_SPCR_BITS | PCH_ALL);
+       else
                /* set receive threshold to maximum */
                pch_spi_setclr_reg(data->master, PCH_SPCR,
-                                  PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD,
-                                  ~MASK_TFIC_SPCR_BITS);
-               /* enable FI interrupt */
-               pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0);
-       }
-
-       dev_dbg(&data->master->dev,
-               "%s:invoking pch_spi_set_enable to enable SPI\n", __func__);
-
-       /* SPI set enable */
-       pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0);
+                                  PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
+                                  SPCR_FIE_BIT | SPCR_ORIE_BIT |
+                                  SPCR_SPE_BIT,
+                                  MASK_RFIC_SPCR_BITS | PCH_ALL);
 
        /* Wait until the transfer completes; go to sleep after
                                 initiating the transfer. */
@@ -710,15 +760,13 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
        dev_dbg(&data->master->dev,
                "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
 
-       data->transfer_active = false;
-       dev_dbg(&data->master->dev,
-               "%s set data->transfer_active = false\n", __func__);
-
        /* clear all interrupts */
        pch_spi_writereg(data->master, PCH_SPSR,
                         pch_spi_readreg(data->master, PCH_SPSR));
-       /* disable interrupts */
-       pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+       /* Disable interrupts and SPI transfer */
+       pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
+       /* clear FIFO */
+       pch_spi_clear_fifo(data->master);
 }
 
 static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
@@ -742,6 +790,327 @@ static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
        }
 }
 
+static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
+{
+       int j;
+       u8 *rx_buf;
+       u16 *rx_sbuf;
+       const u8 *rx_dma_buf;
+       const u16 *rx_dma_sbuf;
+
+       /* copy Rx Data */
+       if (!data->cur_trans->rx_buf)
+               return;
+
+       if (bpw == 8) {
+               rx_buf = data->cur_trans->rx_buf;
+               rx_dma_buf = data->dma.rx_buf_virt;
+               for (j = 0; j < data->bpw_len; j++)
+                       *rx_buf++ = *rx_dma_buf++ & 0xFF;
+       } else {
+               rx_sbuf = data->cur_trans->rx_buf;
+               rx_dma_sbuf = data->dma.rx_buf_virt;
+               for (j = 0; j < data->bpw_len; j++)
+                       *rx_sbuf++ = *rx_dma_sbuf++;
+       }
+}
+
+static void pch_spi_start_transfer(struct pch_spi_data *data)
+{
+       struct pch_spi_dma_ctrl *dma;
+       unsigned long flags;
+
+       dma = &data->dma;
+
+       spin_lock_irqsave(&data->lock, flags);
+
+       /* disable interrupts, SPI set enable */
+       pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
+
+       spin_unlock_irqrestore(&data->lock, flags);
+
+       /* Wait until the transfer completes; go to sleep after
+                                initiating the transfer. */
+       dev_dbg(&data->master->dev,
+               "%s:waiting for transfer to get over\n", __func__);
+       wait_event_interruptible(data->wait, data->transfer_complete);
+
+       dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
+                           DMA_FROM_DEVICE);
+       async_tx_ack(dma->desc_rx);
+       async_tx_ack(dma->desc_tx);
+       kfree(dma->sg_tx_p);
+       kfree(dma->sg_rx_p);
+
+       spin_lock_irqsave(&data->lock, flags);
+       pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
+       dev_dbg(&data->master->dev,
+               "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
+
+       /* clear fifo threshold, disable interrupts, disable SPI transfer */
+       pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+                          MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
+                          SPCR_SPE_BIT);
+       /* clear all interrupts */
+       pch_spi_writereg(data->master, PCH_SPSR,
+                        pch_spi_readreg(data->master, PCH_SPSR));
+       /* clear FIFO */
+       pch_spi_clear_fifo(data->master);
+
+       spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void pch_dma_rx_complete(void *arg)
+{
+       struct pch_spi_data *data = arg;
+
+       /* transfer is completed;inform pch_spi_process_messages_dma */
+       data->transfer_complete = true;
+       wake_up_interruptible(&data->wait);
+}
+
+static bool pch_spi_filter(struct dma_chan *chan, void *slave)
+{
+       struct pch_dma_slave *param = slave;
+
+       if ((chan->chan_id == param->chan_id) &&
+           (param->dma_dev == chan->device->dev)) {
+               chan->private = param;
+               return true;
+       } else {
+               return false;
+       }
+}
+
+static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
+{
+       dma_cap_mask_t mask;
+       struct dma_chan *chan;
+       struct pci_dev *dma_dev;
+       struct pch_dma_slave *param;
+       struct pch_spi_dma_ctrl *dma;
+       unsigned int width;
+
+       if (bpw == 8)
+               width = PCH_DMA_WIDTH_1_BYTE;
+       else
+               width = PCH_DMA_WIDTH_2_BYTES;
+
+       dma = &data->dma;
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+
+       /* Get DMA's dev information */
+       dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(12, 0));
+
+       /* Set Tx DMA */
+       param = &dma->param_tx;
+       param->dma_dev = &dma_dev->dev;
+       param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
+       param->tx_reg = data->io_base_addr + PCH_SPDWR;
+       param->width = width;
+       chan = dma_request_channel(mask, pch_spi_filter, param);
+       if (!chan) {
+               dev_err(&data->master->dev,
+                       "ERROR: dma_request_channel FAILS(Tx)\n");
+               data->use_dma = 0;
+               return;
+       }
+       dma->chan_tx = chan;
+
+       /* Set Rx DMA */
+       param = &dma->param_rx;
+       param->dma_dev = &dma_dev->dev;
+       param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
+       param->rx_reg = data->io_base_addr + PCH_SPDRR;
+       param->width = width;
+       chan = dma_request_channel(mask, pch_spi_filter, param);
+       if (!chan) {
+               dev_err(&data->master->dev,
+                       "ERROR: dma_request_channel FAILS(Rx)\n");
+               dma_release_channel(dma->chan_tx);
+               dma->chan_tx = NULL;
+               data->use_dma = 0;
+               return;
+       }
+       dma->chan_rx = chan;
+}
+
+static void pch_spi_release_dma(struct pch_spi_data *data)
+{
+       struct pch_spi_dma_ctrl *dma;
+
+       dma = &data->dma;
+       if (dma->chan_tx) {
+               dma_release_channel(dma->chan_tx);
+               dma->chan_tx = NULL;
+       }
+       if (dma->chan_rx) {
+               dma_release_channel(dma->chan_rx);
+               dma->chan_rx = NULL;
+       }
+       return;
+}
+
+static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+{
+       const u8 *tx_buf;
+       const u16 *tx_sbuf;
+       u8 *tx_dma_buf;
+       u16 *tx_dma_sbuf;
+       struct scatterlist *sg;
+       struct dma_async_tx_descriptor *desc_tx;
+       struct dma_async_tx_descriptor *desc_rx;
+       int num;
+       int i;
+       int size;
+       int rem;
+       unsigned long flags;
+       struct pch_spi_dma_ctrl *dma;
+
+       dma = &data->dma;
+
+       /* set baud rate if needed */
+       if (data->cur_trans->speed_hz) {
+               dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
+               spin_lock_irqsave(&data->lock, flags);
+               pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+               spin_unlock_irqrestore(&data->lock, flags);
+       }
+
+       /* set bits per word if needed */
+       if (data->cur_trans->bits_per_word &&
+           (data->current_msg->spi->bits_per_word !=
+            data->cur_trans->bits_per_word)) {
+               dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
+               spin_lock_irqsave(&data->lock, flags);
+               pch_spi_set_bits_per_word(data->master,
+                                         data->cur_trans->bits_per_word);
+               spin_unlock_irqrestore(&data->lock, flags);
+               *bpw = data->cur_trans->bits_per_word;
+       } else {
+               *bpw = data->current_msg->spi->bits_per_word;
+       }
+       data->bpw_len = data->cur_trans->len / (*bpw / 8);
+
+       /* copy Tx Data */
+       if (data->cur_trans->tx_buf != NULL) {
+               if (*bpw == 8) {
+                       tx_buf = data->cur_trans->tx_buf;
+                       tx_dma_buf = dma->tx_buf_virt;
+                       for (i = 0; i < data->bpw_len; i++)
+                               *tx_dma_buf++ = *tx_buf++;
+               } else {
+                       tx_sbuf = data->cur_trans->tx_buf;
+                       tx_dma_sbuf = dma->tx_buf_virt;
+                       for (i = 0; i < data->bpw_len; i++)
+                               *tx_dma_sbuf++ = *tx_sbuf++;
+               }
+       }
+       if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+               num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+               size = PCH_DMA_TRANS_SIZE;
+               rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
+       } else {
+               num = 1;
+               size = data->bpw_len;
+               rem = data->bpw_len;
+       }
+       dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
+               __func__, num, size, rem);
+       spin_lock_irqsave(&data->lock, flags);
+
+       /* set receive fifo threshold and transmit fifo threshold */
+       pch_spi_setclr_reg(data->master, PCH_SPCR,
+                          ((size - 1) << SPCR_RFIC_FIELD) |
+                          ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) <<
+                           SPCR_TFIC_FIELD),
+                          MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
+
+       spin_unlock_irqrestore(&data->lock, flags);
+
+       /* RX */
+       dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+       sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
+       /* offset, length setting */
+       sg = dma->sg_rx_p;
+       for (i = 0; i < num; i++, sg++) {
+               if (i == 0) {
+                       sg->offset = 0;
+                       sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
+                                   sg->offset);
+                       sg_dma_len(sg) = rem;
+               } else {
+                       sg->offset = rem + size * (i - 1);
+                       sg->offset = sg->offset * (*bpw / 8);
+                       sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+                                   sg->offset);
+                       sg_dma_len(sg) = size;
+               }
+               sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
+       }
+       sg = dma->sg_rx_p;
+       desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
+                                       num, DMA_FROM_DEVICE,
+                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc_rx) {
+               dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
+                       __func__);
+               return;
+       }
+       dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
+       desc_rx->callback = pch_dma_rx_complete;
+       desc_rx->callback_param = data;
+       dma->nent = num;
+       dma->desc_rx = desc_rx;
+
+       /* TX */
+       dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+       sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
+       /* offset, length setting */
+       sg = dma->sg_tx_p;
+       for (i = 0; i < num; i++, sg++) {
+               if (i == 0) {
+                       sg->offset = 0;
+                       sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
+                                   sg->offset);
+                       sg_dma_len(sg) = rem;
+               } else {
+                       sg->offset = rem + size * (i - 1);
+                       sg->offset = sg->offset * (*bpw / 8);
+                       sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
+                                   sg->offset);
+                       sg_dma_len(sg) = size;
+               }
+               sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
+       }
+       sg = dma->sg_tx_p;
+       desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
+                                       sg, num, DMA_TO_DEVICE,
+                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc_tx) {
+               dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
+                       __func__);
+               return;
+       }
+       dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
+       desc_tx->callback = NULL;
+       desc_tx->callback_param = data;
+       dma->nent = num;
+       dma->desc_tx = desc_tx;
+
+       dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
+               "0x2 to SSNXCR\n", __func__);
+
+       spin_lock_irqsave(&data->lock, flags);
+       pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+       desc_rx->tx_submit(desc_rx);
+       desc_tx->tx_submit(desc_tx);
+       spin_unlock_irqrestore(&data->lock, flags);
+
+       /* reset transfer complete flag */
+       data->transfer_complete = false;
+}
 
 static void pch_spi_process_messages(struct work_struct *pwork)
 {
@@ -753,13 +1122,10 @@ static void pch_spi_process_messages(struct work_struct *pwork)
        dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
 
        spin_lock(&data->lock);
-
        /* check if suspend has been initiated;if yes flush queue */
        if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
-               dev_dbg(&data->master->dev,
-                       "%s suspend/remove initiated,flushing queue\n",
-                       __func__);
-
+               dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
+                       "flushing queue\n", __func__);
                list_for_each_entry(pmsg, data->queue.next, queue) {
                        pmsg->status = -EIO;
 
@@ -793,53 +1159,42 @@ static void pch_spi_process_messages(struct work_struct *pwork)
 
        spin_unlock(&data->lock);
 
+       if (data->use_dma)
+               pch_spi_request_dma(data,
+                                   data->current_msg->spi->bits_per_word);
        do {
                /* If we are already processing a message get the next
                transfer structure from the message otherwise retrieve
                the 1st transfer request from the message. */
                spin_lock(&data->lock);
-
                if (data->cur_trans == NULL) {
                        data->cur_trans =
-                           list_entry(data->current_msg->transfers.
-                                      next, struct spi_transfer,
-                                      transfer_list);
-                       dev_dbg(&data->master->dev,
-                               "%s :Getting 1st transfer message\n", __func__);
+                               list_entry(data->current_msg->transfers.next,
+                                          struct spi_transfer, transfer_list);
+                       dev_dbg(&data->master->dev, "%s "
+                               ":Getting 1st transfer message\n", __func__);
                } else {
                        data->cur_trans =
-                           list_entry(data->cur_trans->transfer_list.next,
-                                      struct spi_transfer,
-                                      transfer_list);
-                       dev_dbg(&data->master->dev,
-                               "%s :Getting next transfer message\n",
-                               __func__);
+                               list_entry(data->cur_trans->transfer_list.next,
+                                          struct spi_transfer, transfer_list);
+                       dev_dbg(&data->master->dev, "%s "
+                               ":Getting next transfer message\n", __func__);
                }
-
                spin_unlock(&data->lock);
 
-               pch_spi_set_tx(data, &bpw, &pmsg);
-
-               /* Control interrupt*/
-               pch_spi_set_ir(data);
-
-               /* Disable SPI transfer */
-               pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0,
-                                  SPCR_SPE_BIT);
-
-               /* clear FIFO */
-               pch_spi_clear_fifo(data->master);
-
-               /* copy Rx Data */
-               pch_spi_copy_rx_data(data, bpw);
-
-               /* free memory */
-               kfree(data->pkt_rx_buff);
-               data->pkt_rx_buff = NULL;
-
-               kfree(data->pkt_tx_buff);
-               data->pkt_tx_buff = NULL;
-
+               if (data->use_dma) {
+                       pch_spi_handle_dma(data, &bpw);
+                       pch_spi_start_transfer(data);
+                       pch_spi_copy_rx_data_for_dma(data, bpw);
+               } else {
+                       pch_spi_set_tx(data, &bpw);
+                       pch_spi_set_ir(data);
+                       pch_spi_copy_rx_data(data, bpw);
+                       kfree(data->pkt_rx_buff);
+                       data->pkt_rx_buff = NULL;
+                       kfree(data->pkt_tx_buff);
+                       data->pkt_tx_buff = NULL;
+               }
                /* increment message count */
                data->current_msg->actual_length += data->cur_trans->len;
 
@@ -860,125 +1215,60 @@ static void pch_spi_process_messages(struct work_struct *pwork)
                /* No more transfer in this message. */
                if ((data->cur_trans->transfer_list.next) ==
                    &(data->current_msg->transfers)) {
-                       pch_spi_nomore_transfer(data, pmsg);
+                       pch_spi_nomore_transfer(data);
                }
 
                spin_unlock(&data->lock);
 
        } while (data->cur_trans != NULL);
+
+       if (data->use_dma)
+               pch_spi_release_dma(data);
 }
 
-static void pch_spi_free_resources(struct pch_spi_board_data *board_dat)
+static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
+                                  struct pch_spi_data *data)
 {
        dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
 
        /* free workqueue */
-       if (board_dat->data->wk != NULL) {
-               destroy_workqueue(board_dat->data->wk);
-               board_dat->data->wk = NULL;
+       if (data->wk != NULL) {
+               destroy_workqueue(data->wk);
+               data->wk = NULL;
                dev_dbg(&board_dat->pdev->dev,
                        "%s destroy_workqueue invoked successfully\n",
                        __func__);
        }
-
-       /* disable interrupts & free IRQ */
-       if (board_dat->irq_reg_sts) {
-               /* disable interrupts */
-               pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
-                                  PCH_ALL);
-
-               /* free IRQ */
-               free_irq(board_dat->pdev->irq, board_dat);
-
-               dev_dbg(&board_dat->pdev->dev,
-                       "%s free_irq invoked successfully\n", __func__);
-
-               board_dat->irq_reg_sts = false;
-       }
-
-       /* unmap PCI base address */
-       if (board_dat->data->io_remap_addr != 0) {
-               pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr);
-
-               board_dat->data->io_remap_addr = 0;
-
-               dev_dbg(&board_dat->pdev->dev,
-                       "%s pci_iounmap invoked successfully\n", __func__);
-       }
-
-       /* release PCI region */
-       if (board_dat->pci_req_sts) {
-               pci_release_regions(board_dat->pdev);
-               dev_dbg(&board_dat->pdev->dev,
-                       "%s pci_release_regions invoked successfully\n",
-                       __func__);
-               board_dat->pci_req_sts = false;
-       }
 }
 
-static int pch_spi_get_resources(struct pch_spi_board_data *board_dat)
+static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
+                                struct pch_spi_data *data)
 {
-       void __iomem *io_remap_addr;
-       int retval;
+       int retval = 0;
+
        dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
 
        /* create workqueue */
-       board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
-       if (!board_dat->data->wk) {
+       data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
+       if (!data->wk) {
                dev_err(&board_dat->pdev->dev,
                        "%s create_singlet hread_workqueue failed\n", __func__);
                retval = -EBUSY;
                goto err_return;
        }
 
-       dev_dbg(&board_dat->pdev->dev,
-               "%s create_singlethread_workqueue success\n", __func__);
-
-       retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME);
-       if (retval != 0) {
-               dev_err(&board_dat->pdev->dev,
-                       "%s request_region failed\n", __func__);
-               goto err_return;
-       }
-
-       board_dat->pci_req_sts = true;
-
-       io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
-       if (io_remap_addr == 0) {
-               dev_err(&board_dat->pdev->dev,
-                       "%s pci_iomap failed\n", __func__);
-               retval = -ENOMEM;
-               goto err_return;
-       }
-
-       /* calculate base address for all channels */
-       board_dat->data->io_remap_addr = io_remap_addr;
-
        /* reset PCH SPI h/w */
-       pch_spi_reset(board_dat->data->master);
+       pch_spi_reset(data->master);
        dev_dbg(&board_dat->pdev->dev,
                "%s pch_spi_reset invoked successfully\n", __func__);
 
-       /* register IRQ */
-       retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
-                            IRQF_SHARED, KBUILD_MODNAME, board_dat);
-       if (retval != 0) {
-               dev_err(&board_dat->pdev->dev,
-                       "%s request_irq failed\n", __func__);
-               goto err_return;
-       }
-
-       dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n",
-               __func__, retval);
-
-       board_dat->irq_reg_sts = true;
        dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
 
 err_return:
        if (retval != 0) {
                dev_err(&board_dat->pdev->dev,
                        "%s FAIL:invoking pch_spi_free_resources\n", __func__);
-               pch_spi_free_resources(board_dat);
+               pch_spi_free_resources(board_dat, data);
        }
 
        dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
@@ -986,255 +1276,387 @@ err_return:
        return retval;
 }
 
-static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
+                            struct pch_spi_data *data)
 {
+       struct pch_spi_dma_ctrl *dma;
+
+       dma = &data->dma;
+       if (dma->tx_buf_dma)
+               dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+                                 dma->tx_buf_virt, dma->tx_buf_dma);
+       if (dma->rx_buf_dma)
+               dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+                                 dma->rx_buf_virt, dma->rx_buf_dma);
+       return;
+}
 
-       struct spi_master *master;
-
-       struct pch_spi_board_data *board_dat;
-       int retval;
-
-       dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
-
-       /* allocate memory for private data */
-       board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
-       if (board_dat == NULL) {
-               dev_err(&pdev->dev,
-                       " %s memory allocation for private data failed\n",
-                       __func__);
-               retval = -ENOMEM;
-               goto err_kmalloc;
-       }
+static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+                             struct pch_spi_data *data)
+{
+       struct pch_spi_dma_ctrl *dma;
+
+       dma = &data->dma;
+       /* Get Consistent memory for Tx DMA */
+       dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+                               PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
+       /* Get Consistent memory for Rx DMA */
+       dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+                               PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
+}
 
-       dev_dbg(&pdev->dev,
-               "%s memory allocation for private data success\n", __func__);
+static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
+{
+       int ret;
+       struct spi_master *master;
+       struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+       struct pch_spi_data *data;
 
-       /* enable PCI device */
-       retval = pci_enable_device(pdev);
-       if (retval != 0) {
-               dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__);
+       dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
 
-               goto err_pci_en_device;
+       master = spi_alloc_master(&board_dat->pdev->dev,
+                                 sizeof(struct pch_spi_data));
+       if (!master) {
+               dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
+                       plat_dev->id);
+               return -ENOMEM;
        }
 
-       dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n",
-               __func__, retval);
+       data = spi_master_get_devdata(master);
+       data->master = master;
 
-       board_dat->pdev = pdev;
+       platform_set_drvdata(plat_dev, data);
 
-       /* alllocate memory for SPI master */
-       master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data));
-       if (master == NULL) {
-               retval = -ENOMEM;
-               dev_err(&pdev->dev, "%s Fail.\n", __func__);
-               goto err_spi_alloc_master;
+       /* baseaddress + address offset) */
+       data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
+                                        PCH_ADDRESS_SIZE * plat_dev->id;
+       data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
+                                        PCH_ADDRESS_SIZE * plat_dev->id;
+       if (!data->io_remap_addr) {
+               dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
+               ret = -ENOMEM;
+               goto err_pci_iomap;
        }
 
-       dev_dbg(&pdev->dev,
-               "%s spi_alloc_master returned non NULL\n", __func__);
+       dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
+               plat_dev->id, data->io_remap_addr);
 
        /* initialize members of SPI master */
        master->bus_num = -1;
        master->num_chipselect = PCH_MAX_CS;
        master->setup = pch_spi_setup;
        master->transfer = pch_spi_transfer;
-       dev_dbg(&pdev->dev,
-               "%s transfer member of SPI master initialized\n", __func__);
-
-       board_dat->data = spi_master_get_devdata(master);
-
-       board_dat->data->master = master;
-       board_dat->data->n_curnt_chip = 255;
-       board_dat->data->board_dat = board_dat;
-       board_dat->data->status = STATUS_RUNNING;
-
-       INIT_LIST_HEAD(&board_dat->data->queue);
-       spin_lock_init(&board_dat->data->lock);
-       INIT_WORK(&board_dat->data->work, pch_spi_process_messages);
-       init_waitqueue_head(&board_dat->data->wait);
 
-       /* allocate resources for PCH SPI */
-       retval = pch_spi_get_resources(board_dat);
-       if (retval) {
-               dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval);
+       data->board_dat = board_dat;
+       data->plat_dev = plat_dev;
+       data->n_curnt_chip = 255;
+       data->status = STATUS_RUNNING;
+       data->ch = plat_dev->id;
+       data->use_dma = use_dma;
+
+       INIT_LIST_HEAD(&data->queue);
+       spin_lock_init(&data->lock);
+       INIT_WORK(&data->work, pch_spi_process_messages);
+       init_waitqueue_head(&data->wait);
+
+       ret = pch_spi_get_resources(board_dat, data);
+       if (ret) {
+               dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
                goto err_spi_get_resources;
        }
 
-       dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n",
-               __func__, retval);
-
-       /* save private data in dev */
-       pci_set_drvdata(pdev, board_dat);
-       dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__);
+       ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
+                         IRQF_SHARED, KBUILD_MODNAME, data);
+       if (ret) {
+               dev_err(&plat_dev->dev,
+                       "%s request_irq failed\n", __func__);
+               goto err_request_irq;
+       }
+       data->irq_reg_sts = true;
 
-       /* set master mode */
        pch_spi_set_master_mode(master);
-       dev_dbg(&pdev->dev,
-               "%s invoked pch_spi_set_master_mode\n", __func__);
 
-       /* Register the controller with the SPI core. */
-       retval = spi_register_master(master);
-       if (retval != 0) {
-               dev_err(&pdev->dev,
+       ret = spi_register_master(master);
+       if (ret != 0) {
+               dev_err(&plat_dev->dev,
                        "%s spi_register_master FAILED\n", __func__);
-               goto err_spi_reg_master;
+               goto err_spi_register_master;
        }
 
-       dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n",
-               __func__, retval);
-
+       if (use_dma) {
+               dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+               pch_alloc_dma_buf(board_dat, data);
+       }
 
        return 0;
 
-err_spi_reg_master:
-       spi_unregister_master(master);
+err_spi_register_master:
+       free_irq(board_dat->pdev->irq, board_dat);
+err_request_irq:
+       pch_spi_free_resources(board_dat, data);
 err_spi_get_resources:
-err_spi_alloc_master:
+       pci_iounmap(board_dat->pdev, data->io_remap_addr);
+err_pci_iomap:
        spi_master_put(master);
-       pci_disable_device(pdev);
-err_pci_en_device:
-       kfree(board_dat);
-err_kmalloc:
-       return retval;
+
+       return ret;
 }
 
-static void pch_spi_remove(struct pci_dev *pdev)
+static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
 {
-       struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
+       struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+       struct pch_spi_data *data = platform_get_drvdata(plat_dev);
        int count;
+       unsigned long flags;
 
-       dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+       dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
+               __func__, plat_dev->id, board_dat->pdev->irq);
 
-       if (!board_dat) {
-               dev_err(&pdev->dev,
-                       "%s pci_get_drvdata returned NULL\n", __func__);
-               return;
-       }
+       if (use_dma)
+               pch_free_dma_buf(board_dat, data);
 
        /* check for any pending messages; no action is taken if the queue
         * is still full; but at least we tried.  Unload anyway */
        count = 500;
-       spin_lock(&board_dat->data->lock);
-       board_dat->data->status = STATUS_EXITING;
-       while ((list_empty(&board_dat->data->queue) == 0) && --count) {
+       spin_lock_irqsave(&data->lock, flags);
+       data->status = STATUS_EXITING;
+       while ((list_empty(&data->queue) == 0) && --count) {
                dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
                        __func__);
-               spin_unlock(&board_dat->data->lock);
+               spin_unlock_irqrestore(&data->lock, flags);
                msleep(PCH_SLEEP_TIME);
-               spin_lock(&board_dat->data->lock);
+               spin_lock_irqsave(&data->lock, flags);
        }
-       spin_unlock(&board_dat->data->lock);
-
-       /* Free resources allocated for PCH SPI */
-       pch_spi_free_resources(board_dat);
-
-       spi_unregister_master(board_dat->data->master);
-
-       /* free memory for private data */
-       kfree(board_dat);
+       spin_unlock_irqrestore(&data->lock, flags);
 
-       pci_set_drvdata(pdev, NULL);
+       pch_spi_free_resources(board_dat, data);
+       /* disable interrupts & free IRQ */
+       if (data->irq_reg_sts) {
+               /* disable interrupts */
+               pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+               data->irq_reg_sts = false;
+               free_irq(board_dat->pdev->irq, data);
+       }
 
-       /* disable PCI device */
-       pci_disable_device(pdev);
+       pci_iounmap(board_dat->pdev, data->io_remap_addr);
+       spi_unregister_master(data->master);
+       spi_master_put(data->master);
+       platform_set_drvdata(plat_dev, NULL);
 
-       dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__);
+       return 0;
 }
-
 #ifdef CONFIG_PM
-static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int pch_spi_pd_suspend(struct platform_device *pd_dev,
+                             pm_message_t state)
 {
        u8 count;
-       int retval;
-
-       struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
+       struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+       struct pch_spi_data *data = platform_get_drvdata(pd_dev);
 
-       dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+       dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
 
        if (!board_dat) {
-               dev_err(&pdev->dev,
+               dev_err(&pd_dev->dev,
                        "%s pci_get_drvdata returned NULL\n", __func__);
                return -EFAULT;
        }
 
-       retval = 0;
-       board_dat->suspend_sts = true;
-
        /* check if the current message is processed:
           Only after thats done the transfer will be suspended */
        count = 255;
        while ((--count) > 0) {
-               if (!(board_dat->data->bcurrent_msg_processing)) {
-                       dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_"
-                               "msg_processing = false\n", __func__);
+               if (!(data->bcurrent_msg_processing))
                        break;
-               } else {
-                       dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_"
-                               "processing = true\n", __func__);
-               }
                msleep(PCH_SLEEP_TIME);
        }
 
        /* Free IRQ */
-       if (board_dat->irq_reg_sts) {
+       if (data->irq_reg_sts) {
                /* disable all interrupts */
-               pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
-                                  PCH_ALL);
-               pch_spi_reset(board_dat->data->master);
-
-               free_irq(board_dat->pdev->irq, board_dat);
+               pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+               pch_spi_reset(data->master);
+               free_irq(board_dat->pdev->irq, data);
 
-               board_dat->irq_reg_sts = false;
-               dev_dbg(&pdev->dev,
+               data->irq_reg_sts = false;
+               dev_dbg(&pd_dev->dev,
                        "%s free_irq invoked successfully.\n", __func__);
        }
 
+       return 0;
+}
+
+static int pch_spi_pd_resume(struct platform_device *pd_dev)
+{
+       struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+       struct pch_spi_data *data = platform_get_drvdata(pd_dev);
+       int retval;
+
+       if (!board_dat) {
+               dev_err(&pd_dev->dev,
+                       "%s pci_get_drvdata returned NULL\n", __func__);
+               return -EFAULT;
+       }
+
+       if (!data->irq_reg_sts) {
+               /* register IRQ */
+               retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
+                                    IRQF_SHARED, KBUILD_MODNAME, data);
+               if (retval < 0) {
+                       dev_err(&pd_dev->dev,
+                               "%s request_irq failed\n", __func__);
+                       return retval;
+               }
+
+               /* reset PCH SPI h/w */
+               pch_spi_reset(data->master);
+               pch_spi_set_master_mode(data->master);
+               data->irq_reg_sts = true;
+       }
+       return 0;
+}
+#else
+#define pch_spi_pd_suspend NULL
+#define pch_spi_pd_resume NULL
+#endif
+
+static struct platform_driver pch_spi_pd_driver = {
+       .driver = {
+               .name = "pch-spi",
+               .owner = THIS_MODULE,
+       },
+       .probe = pch_spi_pd_probe,
+       .remove = __devexit_p(pch_spi_pd_remove),
+       .suspend = pch_spi_pd_suspend,
+       .resume = pch_spi_pd_resume
+};
+
+static int __devinit pch_spi_probe(struct pci_dev *pdev,
+                                  const struct pci_device_id *id)
+{
+       struct pch_spi_board_data *board_dat;
+       struct platform_device *pd_dev = NULL;
+       int retval;
+       int i;
+       struct pch_pd_dev_save *pd_dev_save;
+
+       pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
+       if (!pd_dev_save) {
+               dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
+               return -ENOMEM;
+       }
+
+       board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
+       if (!board_dat) {
+               dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
+               retval = -ENOMEM;
+               goto err_no_mem;
+       }
+
+       retval = pci_request_regions(pdev, KBUILD_MODNAME);
+       if (retval) {
+               dev_err(&pdev->dev, "%s request_region failed\n", __func__);
+               goto pci_request_regions;
+       }
+
+       board_dat->pdev = pdev;
+       board_dat->num = id->driver_data;
+       pd_dev_save->num = id->driver_data;
+       pd_dev_save->board_dat = board_dat;
+
+       retval = pci_enable_device(pdev);
+       if (retval) {
+               dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
+               goto pci_enable_device;
+       }
+
+       for (i = 0; i < board_dat->num; i++) {
+               pd_dev = platform_device_alloc("pch-spi", i);
+               if (!pd_dev) {
+                       dev_err(&pdev->dev, "platform_device_alloc failed\n");
+                       goto err_platform_device;
+               }
+               pd_dev_save->pd_save[i] = pd_dev;
+               pd_dev->dev.parent = &pdev->dev;
+
+               retval = platform_device_add_data(pd_dev, board_dat,
+                                                 sizeof(*board_dat));
+               if (retval) {
+                       dev_err(&pdev->dev,
+                               "platform_device_add_data failed\n");
+                       platform_device_put(pd_dev);
+                       goto err_platform_device;
+               }
+
+               retval = platform_device_add(pd_dev);
+               if (retval) {
+                       dev_err(&pdev->dev, "platform_device_add failed\n");
+                       platform_device_put(pd_dev);
+                       goto err_platform_device;
+               }
+       }
+
+       pci_set_drvdata(pdev, pd_dev_save);
+
+       return 0;
+
+err_platform_device:
+       pci_disable_device(pdev);
+pci_enable_device:
+       pci_release_regions(pdev);
+pci_request_regions:
+       kfree(board_dat);
+err_no_mem:
+       kfree(pd_dev_save);
+
+       return retval;
+}
+
+static void __devexit pch_spi_remove(struct pci_dev *pdev)
+{
+       int i;
+       struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
+
+       for (i = 0; i < pd_dev_save->num; i++)
+               platform_device_unregister(pd_dev_save->pd_save[i]);
+
+       pci_disable_device(pdev);
+       pci_release_regions(pdev);
+       kfree(pd_dev_save->board_dat);
+       kfree(pd_dev_save);
+}
+
+#ifdef CONFIG_PM
+static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       int retval;
+       struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+
+       pd_dev_save->board_dat->suspend_sts = true;
+
        /* save config space */
        retval = pci_save_state(pdev);
-
        if (retval == 0) {
-               dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n",
-                       __func__, retval);
-               /* disable PM notifications */
                pci_enable_wake(pdev, PCI_D3hot, 0);
-               dev_dbg(&pdev->dev,
-                       "%s pci_enable_wake invoked successfully\n", __func__);
-               /* disable PCI device */
                pci_disable_device(pdev);
-               dev_dbg(&pdev->dev,
-                       "%s pci_disable_device invoked successfully\n",
-                       __func__);
-               /* move device to D3hot  state */
                pci_set_power_state(pdev, PCI_D3hot);
-               dev_dbg(&pdev->dev,
-                       "%s pci_set_power_state invoked successfully\n",
-                       __func__);
        } else {
                dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
        }
 
-       dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval);
-
        return retval;
 }
 
 static int pch_spi_resume(struct pci_dev *pdev)
 {
        int retval;
-
-       struct pch_spi_board_data *board = pci_get_drvdata(pdev);
+       struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
        dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
 
-       if (!board) {
-               dev_err(&pdev->dev,
-                       "%s pci_get_drvdata returned NULL\n", __func__);
-               return -EFAULT;
-       }
-
-       /* move device to DO power state */
        pci_set_power_state(pdev, PCI_D0);
-
-       /* restore state */
        pci_restore_state(pdev);
 
        retval = pci_enable_device(pdev);
@@ -1242,34 +1664,12 @@ static int pch_spi_resume(struct pci_dev *pdev)
                dev_err(&pdev->dev,
                        "%s pci_enable_device failed\n", __func__);
        } else {
-               /* disable PM notifications */
                pci_enable_wake(pdev, PCI_D3hot, 0);
 
-               /* register IRQ handler */
-               if (!board->irq_reg_sts) {
-                       /* register IRQ */
-                       retval = request_irq(board->pdev->irq, pch_spi_handler,
-                                            IRQF_SHARED, KBUILD_MODNAME,
-                                            board);
-                       if (retval < 0) {
-                               dev_err(&pdev->dev,
-                                       "%s request_irq failed\n", __func__);
-                               return retval;
-                       }
-                       board->irq_reg_sts = true;
-
-                       /* reset PCH SPI h/w */
-                       pch_spi_reset(board->data->master);
-                       pch_spi_set_master_mode(board->data->master);
-
-                       /* set suspend status to false */
-                       board->suspend_sts = false;
-
-               }
+               /* set suspend status to false */
+               pd_dev_save->board_dat->suspend_sts = false;
        }
 
-       dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval);
-
        return retval;
 }
 #else
@@ -1289,15 +1689,29 @@ static struct pci_driver pch_spi_pcidev = {
 
 static int __init pch_spi_init(void)
 {
-       return pci_register_driver(&pch_spi_pcidev);
+       int ret;
+       ret = platform_driver_register(&pch_spi_pd_driver);
+       if (ret)
+               return ret;
+
+       ret = pci_register_driver(&pch_spi_pcidev);
+       if (ret)
+               return ret;
+
+       return 0;
 }
 module_init(pch_spi_init);
 
 static void __exit pch_spi_exit(void)
 {
        pci_unregister_driver(&pch_spi_pcidev);
+       platform_driver_unregister(&pch_spi_pd_driver);
 }
 module_exit(pch_spi_exit);
 
+module_param(use_dma, int, 0644);
+MODULE_PARM_DESC(use_dma,
+                "to use DMA for data transfers pass 1 else 0; default 1");
+
 MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver");
similarity index 99%
rename from drivers/spi/spi_txx9.c
rename to drivers/spi/spi-txx9.c
index dfa024b..f0a2ab0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * spi_txx9.c - TXx9 SPI controller driver.
+ * TXx9 SPI controller driver.
  *
  * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
  * Copyright (C) 2000-2001 Toshiba Corporation
index 2e13a14..4d1b9f5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * spi.c - SPI init/core code
+ * SPI init/core code
  *
  * Copyright (C) 2005 David Brownell
  *
index d9fd862..830adbe 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * spidev.c -- simple synchronous userspace interface to SPI devices
+ * Simple synchronous userspace interface to SPI devices
  *
  * Copyright (C) 2006 SWAPP
  *     Andrea Paterniani <a.paterniani@swapp-eng.it>
index 104e95c..c7417c7 100644 (file)
@@ -106,12 +106,12 @@ static struct ep93xx_ac97_info *ep93xx_ac97_info;
 
 static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = {
        .name           = "ac97-pcm-out",
-       .dma_port       = EP93XX_DMA_M2P_PORT_AAC1,
+       .dma_port       = EP93XX_DMA_AAC1,
 };
 
 static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = {
        .name           = "ac97-pcm-in",
-       .dma_port       = EP93XX_DMA_M2P_PORT_AAC1,
+       .dma_port       = EP93XX_DMA_AAC1,
 };
 
 static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info,
index 042f4e9..30df425 100644 (file)
@@ -70,11 +70,11 @@ struct ep93xx_i2s_info {
 struct ep93xx_pcm_dma_params ep93xx_i2s_dma_params[] = {
        [SNDRV_PCM_STREAM_PLAYBACK] = {
                .name           = "i2s-pcm-out",
-               .dma_port       = EP93XX_DMA_M2P_PORT_I2S1,
+               .dma_port       = EP93XX_DMA_I2S1,
        },
        [SNDRV_PCM_STREAM_CAPTURE] = {
                .name           = "i2s-pcm-in",
-               .dma_port       = EP93XX_DMA_M2P_PORT_I2S1,
+               .dma_port       = EP93XX_DMA_I2S1,
        },
 };
 
index a456e49..a07f99c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/slab.h>
+#include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 
 #include <sound/core.h>
@@ -53,43 +54,34 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = {
 
 struct ep93xx_runtime_data
 {
-       struct ep93xx_dma_m2p_client    cl;
-       struct ep93xx_pcm_dma_params    *params;
        int                             pointer_bytes;
-       struct tasklet_struct           period_tasklet;
        int                             periods;
-       struct ep93xx_dma_buffer        buf[32];
+       int                             period_bytes;
+       struct dma_chan                 *dma_chan;
+       struct ep93xx_dma_data          dma_data;
 };
 
-static void ep93xx_pcm_period_elapsed(unsigned long data)
+static void ep93xx_pcm_dma_callback(void *data)
 {
-       struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data;
-       snd_pcm_period_elapsed(substream);
-}
+       struct snd_pcm_substream *substream = data;
+       struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
 
-static void ep93xx_pcm_buffer_started(void *cookie,
-                                     struct ep93xx_dma_buffer *buf)
-{
+       rtd->pointer_bytes += rtd->period_bytes;
+       rtd->pointer_bytes %= rtd->period_bytes * rtd->periods;
+
+       snd_pcm_period_elapsed(substream);
 }
 
-static void ep93xx_pcm_buffer_finished(void *cookie, 
-                                      struct ep93xx_dma_buffer *buf, 
-                                      int bytes, int error)
+static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param)
 {
-       struct snd_pcm_substream *substream = cookie;
-       struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
-
-       if (buf == rtd->buf + rtd->periods - 1)
-               rtd->pointer_bytes = 0;
-       else
-               rtd->pointer_bytes += buf->size;
+       struct ep93xx_dma_data *data = filter_param;
 
-       if (!error) {
-               ep93xx_dma_m2p_submit_recursive(&rtd->cl, buf);
-               tasklet_schedule(&rtd->period_tasklet);
-       } else {
-               snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+       if (data->direction == ep93xx_dma_chan_direction(chan)) {
+               chan->private = data;
+               return true;
        }
+
+       return false;
 }
 
 static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
@@ -98,30 +90,38 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
        struct snd_soc_dai *cpu_dai = soc_rtd->cpu_dai;
        struct ep93xx_pcm_dma_params *dma_params;
        struct ep93xx_runtime_data *rtd;    
+       dma_cap_mask_t mask;
        int ret;
 
-       dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream);
+       ret = snd_pcm_hw_constraint_integer(substream->runtime,
+                                           SNDRV_PCM_HW_PARAM_PERIODS);
+       if (ret < 0)
+               return ret;
+
        snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware);
 
        rtd = kmalloc(sizeof(*rtd), GFP_KERNEL);
        if (!rtd) 
                return -ENOMEM;
 
-       memset(&rtd->period_tasklet, 0, sizeof(rtd->period_tasklet));
-       rtd->period_tasklet.func = ep93xx_pcm_period_elapsed;
-       rtd->period_tasklet.data = (unsigned long)substream;
-
-       rtd->cl.name = dma_params->name;
-       rtd->cl.flags = dma_params->dma_port | EP93XX_DMA_M2P_IGNORE_ERROR |
-               ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
-                EP93XX_DMA_M2P_TX : EP93XX_DMA_M2P_RX);
-       rtd->cl.cookie = substream;
-       rtd->cl.buffer_started = ep93xx_pcm_buffer_started;
-       rtd->cl.buffer_finished = ep93xx_pcm_buffer_finished;
-       ret = ep93xx_dma_m2p_client_register(&rtd->cl);
-       if (ret < 0) {
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+       dma_cap_set(DMA_CYCLIC, mask);
+
+       dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream);
+       rtd->dma_data.port = dma_params->dma_port;
+       rtd->dma_data.name = dma_params->name;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               rtd->dma_data.direction = DMA_TO_DEVICE;
+       else
+               rtd->dma_data.direction = DMA_FROM_DEVICE;
+
+       rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter,
+                                           &rtd->dma_data);
+       if (!rtd->dma_chan) {
                kfree(rtd);
-               return ret;
+               return -EINVAL;
        }
        
        substream->runtime->private_data = rtd;
@@ -132,31 +132,52 @@ static int ep93xx_pcm_close(struct snd_pcm_substream *substream)
 {
        struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
 
-       ep93xx_dma_m2p_client_unregister(&rtd->cl);
+       dma_release_channel(rtd->dma_chan);
        kfree(rtd);
        return 0;
 }
 
+static int ep93xx_pcm_dma_submit(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct ep93xx_runtime_data *rtd = runtime->private_data;
+       struct dma_chan *chan = rtd->dma_chan;
+       struct dma_device *dma_dev = chan->device;
+       struct dma_async_tx_descriptor *desc;
+
+       rtd->pointer_bytes = 0;
+       desc = dma_dev->device_prep_dma_cyclic(chan, runtime->dma_addr,
+                                              rtd->period_bytes * rtd->periods,
+                                              rtd->period_bytes,
+                                              rtd->dma_data.direction);
+       if (!desc)
+               return -EINVAL;
+
+       desc->callback = ep93xx_pcm_dma_callback;
+       desc->callback_param = substream;
+
+       dmaengine_submit(desc);
+       return 0;
+}
+
+static void ep93xx_pcm_dma_flush(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct ep93xx_runtime_data *rtd = runtime->private_data;
+
+       dmaengine_terminate_all(rtd->dma_chan);
+}
+
 static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream,
                                struct snd_pcm_hw_params *params)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct ep93xx_runtime_data *rtd = runtime->private_data;
-       size_t totsize = params_buffer_bytes(params);
-       size_t period = params_period_bytes(params);
-       int i;
 
        snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
-       runtime->dma_bytes = totsize;
-
-       rtd->periods = (totsize + period - 1) / period;
-       for (i = 0; i < rtd->periods; i++) {
-               rtd->buf[i].bus_addr = runtime->dma_addr + (i * period);
-               rtd->buf[i].size = period;
-               if ((i + 1) * period > totsize)
-                       rtd->buf[i].size = totsize - (i * period);
-       }
 
+       rtd->periods = params_periods(params);
+       rtd->period_bytes = params_period_bytes(params);
        return 0;
 }
 
@@ -168,24 +189,20 @@ static int ep93xx_pcm_hw_free(struct snd_pcm_substream *substream)
 
 static int ep93xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
-       struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
        int ret;
-       int i;
 
        ret = 0;
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               rtd->pointer_bytes = 0;
-               for (i = 0; i < rtd->periods; i++)
-                       ep93xx_dma_m2p_submit(&rtd->cl, rtd->buf + i);
+               ret = ep93xx_pcm_dma_submit(substream);
                break;
 
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               ep93xx_dma_m2p_flush(&rtd->cl);
+               ep93xx_pcm_dma_flush(substream);
                break;
 
        default: