diff options
author | David S. Miller <davem@davemloft.net> | 2012-04-10 14:30:45 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-04-10 14:30:45 -0400 |
commit | 06eb4eafbdc0796d741d139a44f1253278da8611 (patch) | |
tree | fbdb44317130c371928154c9e6903e699fe2b995 /drivers/mmc/host/davinci_mmc.c | |
parent | 32ed53b83ea5ec26a4dba90e18f5e0ff6c71eb48 (diff) | |
parent | f68e556e23d1a4176b563bcb25d8baf2c5313f91 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers/mmc/host/davinci_mmc.c')
-rw-r--r-- | drivers/mmc/host/davinci_mmc.c | 66 |
1 files changed, 55 insertions, 11 deletions
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 64a8325a4a8a..c1f3673ae1ef 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c | |||
@@ -160,6 +160,16 @@ module_param(rw_threshold, uint, S_IRUGO); | |||
160 | MODULE_PARM_DESC(rw_threshold, | 160 | MODULE_PARM_DESC(rw_threshold, |
161 | "Read/Write threshold. Default = 32"); | 161 | "Read/Write threshold. Default = 32"); |
162 | 162 | ||
163 | static unsigned poll_threshold = 128; | ||
164 | module_param(poll_threshold, uint, S_IRUGO); | ||
165 | MODULE_PARM_DESC(poll_threshold, | ||
166 | "Polling transaction size threshold. Default = 128"); | ||
167 | |||
168 | static unsigned poll_loopcount = 32; | ||
169 | module_param(poll_loopcount, uint, S_IRUGO); | ||
170 | MODULE_PARM_DESC(poll_loopcount, | ||
171 | "Maximum polling loop count. Default = 32"); | ||
172 | |||
163 | static unsigned __initdata use_dma = 1; | 173 | static unsigned __initdata use_dma = 1; |
164 | module_param(use_dma, uint, 0); | 174 | module_param(use_dma, uint, 0); |
165 | MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); | 175 | MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); |
@@ -193,6 +203,7 @@ struct mmc_davinci_host { | |||
193 | bool use_dma; | 203 | bool use_dma; |
194 | bool do_dma; | 204 | bool do_dma; |
195 | bool sdio_int; | 205 | bool sdio_int; |
206 | bool active_request; | ||
196 | 207 | ||
197 | /* Scatterlist DMA uses one or more parameter RAM entries: | 208 | /* Scatterlist DMA uses one or more parameter RAM entries: |
198 | * the main one (associated with rxdma or txdma) plus zero or | 209 | * the main one (associated with rxdma or txdma) plus zero or |
@@ -219,6 +230,7 @@ struct mmc_davinci_host { | |||
219 | #endif | 230 | #endif |
220 | }; | 231 | }; |
221 | 232 | ||
233 | static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); | ||
222 | 234 | ||
223 | /* PIO only */ | 235 | /* PIO only */ |
224 | static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) | 236 | static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) |
@@ -376,7 +388,20 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, | |||
376 | 388 | ||
377 | writel(cmd->arg, host->base + DAVINCI_MMCARGHL); | 389 | writel(cmd->arg, host->base + DAVINCI_MMCARGHL); |
378 | writel(cmd_reg, host->base + DAVINCI_MMCCMD); | 390 | writel(cmd_reg, host->base + DAVINCI_MMCCMD); |
379 | writel(im_val, host->base + DAVINCI_MMCIM); | 391 | |
392 | host->active_request = true; | ||
393 | |||
394 | if (!host->do_dma && host->bytes_left <= poll_threshold) { | ||
395 | u32 count = poll_loopcount; | ||
396 | |||
397 | while (host->active_request && count--) { | ||
398 | mmc_davinci_irq(0, host); | ||
399 | cpu_relax(); | ||
400 | } | ||
401 | } | ||
402 | |||
403 | if (host->active_request) | ||
404 | writel(im_val, host->base + DAVINCI_MMCIM); | ||
380 | } | 405 | } |
381 | 406 | ||
382 | /*----------------------------------------------------------------------*/ | 407 | /*----------------------------------------------------------------------*/ |
@@ -915,6 +940,7 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) | |||
915 | if (!data->stop || (host->cmd && host->cmd->error)) { | 940 | if (!data->stop || (host->cmd && host->cmd->error)) { |
916 | mmc_request_done(host->mmc, data->mrq); | 941 | mmc_request_done(host->mmc, data->mrq); |
917 | writel(0, host->base + DAVINCI_MMCIM); | 942 | writel(0, host->base + DAVINCI_MMCIM); |
943 | host->active_request = false; | ||
918 | } else | 944 | } else |
919 | mmc_davinci_start_command(host, data->stop); | 945 | mmc_davinci_start_command(host, data->stop); |
920 | } | 946 | } |
@@ -942,6 +968,7 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, | |||
942 | cmd->mrq->cmd->retries = 0; | 968 | cmd->mrq->cmd->retries = 0; |
943 | mmc_request_done(host->mmc, cmd->mrq); | 969 | mmc_request_done(host->mmc, cmd->mrq); |
944 | writel(0, host->base + DAVINCI_MMCIM); | 970 | writel(0, host->base + DAVINCI_MMCIM); |
971 | host->active_request = false; | ||
945 | } | 972 | } |
946 | } | 973 | } |
947 | 974 | ||
@@ -1009,12 +1036,33 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) | |||
1009 | * by read. So, it is not unbouned loop even in the case of | 1036 | * by read. So, it is not unbouned loop even in the case of |
1010 | * non-dma. | 1037 | * non-dma. |
1011 | */ | 1038 | */ |
1012 | while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { | 1039 | if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { |
1013 | davinci_fifo_data_trans(host, rw_threshold); | 1040 | unsigned long im_val; |
1014 | status = readl(host->base + DAVINCI_MMCST0); | 1041 | |
1015 | if (!status) | 1042 | /* |
1016 | break; | 1043 | * If interrupts fire during the following loop, they will be |
1017 | qstatus |= status; | 1044 | * handled by the handler, but the PIC will still buffer these. |
1045 | * As a result, the handler will be called again to serve these | ||
1046 | * needlessly. In order to avoid these spurious interrupts, | ||
1047 | * keep interrupts masked during the loop. | ||
1048 | */ | ||
1049 | im_val = readl(host->base + DAVINCI_MMCIM); | ||
1050 | writel(0, host->base + DAVINCI_MMCIM); | ||
1051 | |||
1052 | do { | ||
1053 | davinci_fifo_data_trans(host, rw_threshold); | ||
1054 | status = readl(host->base + DAVINCI_MMCST0); | ||
1055 | qstatus |= status; | ||
1056 | } while (host->bytes_left && | ||
1057 | (status & (MMCST0_DXRDY | MMCST0_DRRDY))); | ||
1058 | |||
1059 | /* | ||
1060 | * If an interrupt is pending, it is assumed it will fire when | ||
1061 | * it is unmasked. This assumption is also taken when the MMCIM | ||
1062 | * is first set. Otherwise, writing to MMCIM after reading the | ||
1063 | * status is race-prone. | ||
1064 | */ | ||
1065 | writel(im_val, host->base + DAVINCI_MMCIM); | ||
1018 | } | 1066 | } |
1019 | 1067 | ||
1020 | if (qstatus & MMCST0_DATDNE) { | 1068 | if (qstatus & MMCST0_DATDNE) { |
@@ -1418,17 +1466,14 @@ static int davinci_mmcsd_suspend(struct device *dev) | |||
1418 | struct mmc_davinci_host *host = platform_get_drvdata(pdev); | 1466 | struct mmc_davinci_host *host = platform_get_drvdata(pdev); |
1419 | int ret; | 1467 | int ret; |
1420 | 1468 | ||
1421 | mmc_host_enable(host->mmc); | ||
1422 | ret = mmc_suspend_host(host->mmc); | 1469 | ret = mmc_suspend_host(host->mmc); |
1423 | if (!ret) { | 1470 | if (!ret) { |
1424 | writel(0, host->base + DAVINCI_MMCIM); | 1471 | writel(0, host->base + DAVINCI_MMCIM); |
1425 | mmc_davinci_reset_ctrl(host, 1); | 1472 | mmc_davinci_reset_ctrl(host, 1); |
1426 | mmc_host_disable(host->mmc); | ||
1427 | clk_disable(host->clk); | 1473 | clk_disable(host->clk); |
1428 | host->suspended = 1; | 1474 | host->suspended = 1; |
1429 | } else { | 1475 | } else { |
1430 | host->suspended = 0; | 1476 | host->suspended = 0; |
1431 | mmc_host_disable(host->mmc); | ||
1432 | } | 1477 | } |
1433 | 1478 | ||
1434 | return ret; | 1479 | return ret; |
@@ -1444,7 +1489,6 @@ static int davinci_mmcsd_resume(struct device *dev) | |||
1444 | return 0; | 1489 | return 0; |
1445 | 1490 | ||
1446 | clk_enable(host->clk); | 1491 | clk_enable(host->clk); |
1447 | mmc_host_enable(host->mmc); | ||
1448 | 1492 | ||
1449 | mmc_davinci_reset_ctrl(host, 0); | 1493 | mmc_davinci_reset_ctrl(host, 0); |
1450 | ret = mmc_resume_host(host->mmc); | 1494 | ret = mmc_resume_host(host->mmc); |