#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
-#include <linux/workqueue.h>
#include <linux/timer.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
*/
struct regulator *vcc;
struct regulator *vcc_aux;
- struct work_struct mmc_carddetect_work;
void __iomem *base;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
int suspended;
int irq;
int use_dma, dma_ch;
+ int dma_ch_tx, dma_ch_rx;
int dma_line_tx, dma_line_rx;
int slot_id;
int got_dbclk;
irq_mask &= ~DTO_ENABLE;
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
- OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
+ OMAP_HSMMC_WRITE(host->base, ISE, host->use_dma ? irq_mask : 0);
OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
}
static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
+/* for hosts with 35xx erratum 2.1.1.128 */
+static ssize_t
+omap_hsmmc_show_unsafe_read(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+ int val = 0;
+
+ if (!(mmc->caps2 & MMC_CAP2_NO_MULTI_READ)) {
+ val = 1;
+ if (mmc->f_max == OMAP_MMC_MAX_CLOCK)
+ val = 2;
+ }
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t
+omap_hsmmc_set_unsafe_read(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ return -EINVAL;
+
+ switch (val) {
+ case 0:
+ mmc->caps2 |= MMC_CAP2_NO_MULTI_READ;
+ mmc->f_max = OMAP_MMC_MAX_CLOCK;
+ break;
+ case 1:
+ mmc->caps2 &= ~MMC_CAP2_NO_MULTI_READ;
+ mmc->f_max = 32000000;
+ break;
+ case 2:
+ mmc->caps2 &= ~MMC_CAP2_NO_MULTI_READ;
+ mmc->f_max = OMAP_MMC_MAX_CLOCK;
+ break;
+ }
+
+ return count;
+}
+static DEVICE_ATTR(unsafe_read, S_IWUSR | S_IRUGO,
+ omap_hsmmc_show_unsafe_read, omap_hsmmc_set_unsafe_read);
+
/*
* Configure the response type and send the cmd.
*/
dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
- omap_free_dma(dma_ch);
host->data->host_cookie = 0;
}
host->data = NULL;
struct mmc_data *data;
int end_cmd = 0, end_trans = 0;
- if (!host->req_in_progress) {
- do {
- OMAP_HSMMC_WRITE(host->base, STAT, status);
- /* Flush posted write */
- status = OMAP_HSMMC_READ(host->base, STAT);
- } while (status & INT_EN_MASK);
+ if (unlikely(!host->req_in_progress)) {
+ OMAP_HSMMC_WRITE(host->base, STAT, status);
return;
}
data = host->data;
dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
- if (status & ERR) {
+ if (unlikely(status & ERR)) {
omap_hsmmc_dbg_report_irq(host, status);
if ((status & CMD_TIMEOUT) ||
(status & CMD_CRC)) {
}
/*
- * Work Item to notify the core about card insertion/removal
+ * irq handler to notify the core about card insertion/removal
*/
-static void omap_hsmmc_detect(struct work_struct *work)
+static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
{
- struct omap_hsmmc_host *host =
- container_of(work, struct omap_hsmmc_host, mmc_carddetect_work);
+ struct omap_hsmmc_host *host = dev_id;
struct omap_mmc_slot_data *slot = &mmc_slot(host);
int carddetect;
if (host->suspended)
- return;
+ return IRQ_HANDLED;
sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
mmc_detect_change(host->mmc, (HZ * 200) / 1000);
else
mmc_detect_change(host->mmc, (HZ * 50) / 1000);
-}
-
-/*
- * ISR for handling card insertion and removal
- */
-static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id)
-{
- struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id;
-
- if (host->suspended)
- return IRQ_HANDLED;
- schedule_work(&host->mmc_carddetect_work);
-
return IRQ_HANDLED;
}
return sync_dev;
}
+static void omap_hsmmc_config_dma_params_once(struct omap_hsmmc_host *host,
+ struct mmc_data *data,
+ int dma_ch)
+{
+ if (data->flags & MMC_DATA_WRITE) {
+ omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
+ (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
+ omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_data_pack(dma_ch, 1);
+ } else {
+ omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
+ (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
+ omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_data_pack(dma_ch, 1);
+ omap_set_dma_write_mode(dma_ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
+ }
+}
+
static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
struct mmc_data *data,
struct scatterlist *sgl)
{
- int blksz, nblk, dma_ch;
+ int blksz, nblk, dma_ch, sync;
dma_ch = host->dma_ch;
if (data->flags & MMC_DATA_WRITE) {
- omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
sg_dma_address(sgl), 0, 0);
+ sync = OMAP_DMA_DST_SYNC_PREFETCH;
} else {
- omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
sg_dma_address(sgl), 0, 0);
+ sync = OMAP_DMA_SRC_SYNC;
}
blksz = host->data->blksz;
omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
- omap_hsmmc_get_dma_sync_dev(host, data),
- !(data->flags & MMC_DATA_WRITE));
+ omap_hsmmc_get_dma_sync_dev(host, data), sync);
omap_start_dma(dma_ch);
}
{
struct omap_hsmmc_host *host = cb_data;
struct mmc_data *data;
- int dma_ch, req_in_progress;
+ int req_in_progress;
if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
omap_hsmmc_get_dma_dir(host, data));
req_in_progress = host->req_in_progress;
- dma_ch = host->dma_ch;
host->dma_ch = -1;
spin_unlock(&host->irq_lock);
- omap_free_dma(dma_ch);
-
/* If DMA has finished after TC, complete the request */
if (!req_in_progress) {
struct mmc_request *mrq = host->mrq;
{
int dma_len;
- if (!next && data->host_cookie &&
- data->host_cookie != host->next_data.cookie) {
+ if (unlikely(!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie)) {
pr_warning("[%s] invalid cookie: data->host_cookie %d"
" host->next_data.cookie %d\n",
__func__, data->host_cookie, host->next_data.cookie);
}
- if (dma_len == 0)
+ if (unlikely(dma_len == 0))
return -EINVAL;
if (next) {
struct scatterlist *sgl;
sgl = data->sg + i;
- if (sgl->length % data->blksz)
+ if (unlikely(sgl->length % data->blksz))
return -EINVAL;
}
- if ((data->blksz % 4) != 0)
+ if (unlikely((data->blksz % 4) != 0))
/* REVISIT: The MMC buffer increments only when MSB is written.
* Return error for blksz which is non multiple of four.
*/
BUG_ON(host->dma_ch != -1);
- ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
- "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
- if (ret != 0) {
- dev_err(mmc_dev(host->mmc),
- "%s: omap_request_dma() failed with %d\n",
- mmc_hostname(host->mmc), ret);
- return ret;
+ if (data->flags & MMC_DATA_WRITE)
+ dma_ch = host->dma_ch_tx;
+ else
+ dma_ch = host->dma_ch_rx;
+
+ if (dma_ch == -1) {
+ ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
+ "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
+ if (unlikely(ret != 0)) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: omap_request_dma() failed with %d\n",
+ mmc_hostname(host->mmc), ret);
+ return ret;
+ }
+
+ omap_hsmmc_config_dma_params_once(host, data, dma_ch);
+
+ if (data->flags & MMC_DATA_WRITE)
+ host->dma_ch_tx = dma_ch;
+ else
+ host->dma_ch_rx = dma_ch;
}
+
ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
- if (ret)
+ if (unlikely(ret))
return ret;
host->dma_ch = dma_ch;
return 0;
}
-static void set_data_timeout(struct omap_hsmmc_host *host,
- unsigned int timeout_ns,
- unsigned int timeout_clks)
+/* pandora wifi small transfer hack */
+static int check_mmc3_dma_hack(struct omap_hsmmc_host *host,
+ struct mmc_request *req)
{
- unsigned int timeout, cycle_ns;
- uint32_t reg, clkd, dto = 0;
-
- reg = OMAP_HSMMC_READ(host->base, SYSCTL);
- clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
- if (clkd == 0)
- clkd = 1;
-
- cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd);
- timeout = timeout_ns / cycle_ns;
- timeout += timeout_clks;
- if (timeout) {
- while ((timeout & 0x80000000) == 0) {
- dto += 1;
- timeout <<= 1;
- }
- dto = 31 - dto;
- timeout <<= 1;
- if (timeout && dto)
- dto += 1;
- if (dto >= 13)
- dto -= 13;
- else
- dto = 0;
- if (dto > 14)
- dto = 14;
- }
-
- reg &= ~DTO_MASK;
- reg |= dto << DTO_SHIFT;
- OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
+ if (req->data != NULL && req->data->sg_len == 1
+ && req->data->sg->length <= 16)
+ return 0;
+ else
+ return 1;
}
/*
if (req->data == NULL) {
OMAP_HSMMC_WRITE(host->base, BLK, 0);
- /*
- * Set an arbitrary 100ms data timeout for commands with
- * busy signal.
- */
- if (req->cmd->flags & MMC_RSP_BUSY)
- set_data_timeout(host, 100000000U, 0);
return 0;
}
OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
| (req->data->blocks << 16));
- set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
if (host->use_dma) {
ret = omap_hsmmc_start_dma_transfer(host, req);
bool is_first_req)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
+ int use_dma = host->use_dma;
if (mrq->data->host_cookie) {
mrq->data->host_cookie = 0;
return ;
}
- if (host->use_dma)
+ if (host->id == OMAP_MMC3_DEVID)
+ use_dma = check_mmc3_dma_hack(host, mrq);
+ if (use_dma)
if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
&host->next_data))
mrq->data->host_cookie = 0;
}
+#define BWR (1 << 4)
+#define BRR (1 << 5)
+
+static noinline void omap_hsmmc_request_do_pio(struct mmc_host *mmc,
+ struct mmc_request *req)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ u32 *data = sg_virt(req->data->sg);
+ u32 len = req->data->sg->length;
+ int stat;
+ int i;
+
+ for (i = 0; i < 10000000; i++) {
+ stat = OMAP_HSMMC_READ(host->base, STAT);
+ if (stat == 0)
+ continue;
+
+ //dev_err(mmc_dev(host->mmc), "stat %x, l %d\n", stat, i);
+
+ if (stat & (DATA_TIMEOUT | DATA_CRC))
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+
+ if (stat & ERR) {
+ req->cmd->error =
+ req->data->error = -EINVAL; // ?
+ omap_hsmmc_xfer_done(host, host->data);
+ return;
+ }
+
+ if (req->data->flags & MMC_DATA_WRITE) {
+ while (len > 0 && (stat & BWR)) {
+ OMAP_HSMMC_WRITE(host->base, DATA, *data++);
+ len -= 4;
+ }
+ } else {
+ while (len > 0 && (stat & BRR)) {
+ *data++ = OMAP_HSMMC_READ(host->base, DATA);
+ len -= 4;
+ }
+ }
+
+ if ((stat & CC) && host->cmd)
+ omap_hsmmc_cmd_done(host, host->cmd);
+ if ((stat & TC) && host->mrq) {
+ omap_hsmmc_xfer_done(host, host->data);
+ break;
+ }
+ }
+
+ if (len > 0) {
+ req->cmd->error =
+ req->data->error = -ETIMEDOUT;
+ omap_hsmmc_xfer_done(host, req->data);
+ }
+}
+
/*
* Request function. for read/write operation
*/
BUG_ON(host->req_in_progress);
BUG_ON(host->dma_ch != -1);
- if (host->protect_card) {
+ if (unlikely(host->protect_card)) {
if (host->reqs_blocked < 3) {
/*
* Ensure the controller is left in a consistent
return;
} else if (host->reqs_blocked)
host->reqs_blocked = 0;
+
+ /* pandora wifi hack... */
+ if (host->id == OMAP_MMC3_DEVID)
+ host->use_dma = check_mmc3_dma_hack(host, req);
+
WARN_ON(host->mrq != NULL);
host->mrq = req;
err = omap_hsmmc_prepare_data(host, req);
- if (err) {
+ if (unlikely(err)) {
req->cmd->error = err;
if (req->data)
req->data->error = err;
}
omap_hsmmc_start_command(host, req->cmd, req->data);
+
+ if (host->use_dma == 0)
+ omap_hsmmc_request_do_pio(mmc, req);
}
/* Routine to configure clock values. Exposed API to core */
host->use_dma = 1;
host->dev->dma_mask = &pdata->dma_mask;
host->dma_ch = -1;
+ host->dma_ch_tx = -1;
+ host->dma_ch_rx = -1;
host->irq = irq;
host->id = pdev->id;
host->slot_id = 0;
host->next_data.cookie = 1;
platform_set_drvdata(pdev, host);
- INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
mmc->ops = &omap_hsmmc_ops;
omap_hsmmc_context_save(host);
mmc->caps |= MMC_CAP_DISABLE;
+
if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
mmc->caps2 |= MMC_CAP2_NO_MULTI_READ;
/* Request IRQ for card detect */
if ((mmc_slot(host).card_detect_irq)) {
- ret = request_irq(mmc_slot(host).card_detect_irq,
- omap_hsmmc_cd_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- mmc_hostname(mmc), host);
+ ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
+ NULL,
+ omap_hsmmc_detect,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc),
"Unable to grab MMC CD IRQ\n");
goto err_slot_name;
}
+ if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
+ ret = device_create_file(&mmc->class_dev, &dev_attr_unsafe_read);
+
+ /* MMC_CAP2_NO_MULTI_READ makes it crawl, try a different workaround */
+ mmc->caps2 &= ~MMC_CAP2_NO_MULTI_READ;
+ mmc->max_segs = 1;
+ mmc->f_max = 32000000;
+ }
+
omap_hsmmc_debugfs(mmc);
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
free_irq(host->irq, host);
if (mmc_slot(host).card_detect_irq)
free_irq(mmc_slot(host).card_detect_irq, host);
- flush_work_sync(&host->mmc_carddetect_work);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
return ret;
}
}
- cancel_work_sync(&host->mmc_carddetect_work);
ret = mmc_suspend_host(host->mmc);
if (ret == 0) {
} else {
host->suspended = 0;
if (host->pdata->resume) {
- ret = host->pdata->resume(&pdev->dev,
- host->slot_id);
- if (ret)
+ if (host->pdata->resume(&pdev->dev, host->slot_id))
dev_dbg(mmc_dev(host->mmc),
"Unmask interrupt failed\n");
}
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
struct omap_hsmmc_host *host;
+ int dma_ch;
host = platform_get_drvdata(to_platform_device(dev));
omap_hsmmc_context_save(host);
+
+ dma_ch = xchg(&host->dma_ch_tx, -1);
+ if (dma_ch != -1)
+ omap_free_dma(dma_ch);
+
+ dma_ch = xchg(&host->dma_ch_rx, -1);
+ if (dma_ch != -1)
+ omap_free_dma(dma_ch);
+
dev_dbg(mmc_dev(host->mmc), "disabled\n");
return 0;