aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorArnd Hannemann <arnd@arndnet.de>2011-01-05 17:36:14 -0500
committerChris Ball <cjb@laptop.org>2011-01-08 23:52:31 -0500
commit6ff56e0d8e02df023440ea65774cf1d15e669ece (patch)
tree527ca2a5b6ed1e8f2bd0d3407f464663d3e15590 /drivers/mmc
parent23b66071e8ce7f359a0e410a8a3514bd3179e92e (diff)
mmc: tmio_mmc: handle missing HW interrupts
When doing excessive hotplug, e.g., repeated insert/eject operations, the hardware may get confused to a point where no CMDTIMEOUT/CMDRESPEND interrupts are generated any more. As a result requests get stuck, e.g.: [ 360.351562] INFO: task kworker/u:0:5 blocked for more than 120 seconds. [ 360.351562] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 360.359375] kworker/u:0 D c020c2b4 0 5 2 0x00000000 [ 360.367187] Backtrace: [ 360.367187] [<c020bfb0>] (schedule+0x0/0x340) from [<c020c480>] (schedule_timeout+0x20/0x190) [ 360.375000] r8:c702fd70 r7:00000002 r6:c702e000 r5:c702fdc4 r4:7fffffff [ 360.375000] r3:c701e040 [ 360.382812] [<c020c460>] (schedule_timeout+0x0/0x190) from [<c020be78>] (wait_for_common+0xc4/0x150) [ 360.390625] r6:c702e000 r5:c702fdc4 r4:7fffffff [ 360.390625] [<c020bdb4>] (wait_for_common+0x0/0x150) from [<c020bfac>] (wait_for_completion+0x18/0x1c) [ 360.398437] [<c020bf94>] (wait_for_completion+0x0/0x1c) from [<c0185590>] (mmc_wait_for_req+0x214/0x234) [ 360.406250] [<c018537c>] (mmc_wait_for_req+0x0/0x234) from [<c01889d0>] (mmc_sd_switch+0xfc/0x114) [ 360.414062] r7:c702fe4c r6:c702fe20 r5:c7179800 r4:00fffff0 [ 360.421875] [<c01888d4>] (mmc_sd_switch+0x0/0x114) from [<c0187f70>] (mmc_sd_setup_card+0x260/0x384) [ 360.429687] [<c0187d10>] (mmc_sd_setup_card+0x0/0x384) from [<c01885e0>] (mmc_sd_init_card+0x13c/0x1e0) [ 360.437500] [<c01884a4>] (mmc_sd_init_card+0x0/0x1e0) from [<c01887a8>] (mmc_attach_sd+0x124/0x1a8) [ 360.445312] r8:c02db404 r7:ffffff92 r6:c702ff34 r5:c6007da8 r4:c6007c00 [ 360.453125] [<c0188684>] (mmc_attach_sd+0x0/0x1a8) from [<c0185140>] (mmc_rescan+0x248/0x2f0) [ 360.460937] r5:c6007da8 r4:c6007c00 [ 360.468750] [<c0184ef8>] (mmc_rescan+0x0/0x2f0) from [<c00467f0>] (process_one_work+0x1ec/0x318) [ 360.476562] r7:c6007da8 r6:00000000 r5:c710ec00 r4:c701bde0 [ 360.484375] [<c0046604>] (process_one_work+0x0/0x318) from [<c0047fb0>] (worker_thread+0x1b0/0x2cc) [ 360.492187] [<c0047e00>] (worker_thread+0x0/0x2cc) from [<c004b338>] (kthread+0x8c/0x94) [ 360.500000] [<c004b2ac>] (kthread+0x0/0x94) from [<c0037fc4>] (do_exit+0x0/0x590) [ 360.507812] r7:00000013 r6:c0037fc4 r5:c004b2ac r4:c7021f00 This patch addresses this problem by introducing timeouts for outstanding interrupts. If a hardware interrupt is missing, a soft reset will be performed to bring the hardware back to a working state. Tested with the SDHI hardware block in sh7372 / AP4EVB. Signed-off-by: Arnd Hannemann <arnd@arndnet.de> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/tmio_mmc.c90
1 files changed, 86 insertions, 4 deletions
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 81bed310ddcd..689a3692242e 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -39,6 +39,8 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/pagemap.h> 40#include <linux/pagemap.h>
41#include <linux/scatterlist.h> 41#include <linux/scatterlist.h>
42#include <linux/workqueue.h>
43#include <linux/spinlock.h>
42 44
43#define CTL_SD_CMD 0x00 45#define CTL_SD_CMD 0x00
44#define CTL_ARG_REG 0x04 46#define CTL_ARG_REG 0x04
@@ -154,6 +156,11 @@ struct tmio_mmc_host {
154 u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); 156 u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
155 struct scatterlist bounce_sg; 157 struct scatterlist bounce_sg;
156#endif 158#endif
159
160 /* Track lost interrupts */
161 struct delayed_work delayed_reset_work;
162 spinlock_t lock;
163 unsigned long last_req_ts;
157}; 164};
158 165
159static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); 166static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
@@ -345,15 +352,60 @@ static void reset(struct tmio_mmc_host *host)
345 msleep(10); 352 msleep(10);
346} 353}
347 354
355static void tmio_mmc_reset_work(struct work_struct *work)
356{
357 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
358 delayed_reset_work.work);
359 struct mmc_request *mrq;
360 unsigned long flags;
361
362 spin_lock_irqsave(&host->lock, flags);
363 mrq = host->mrq;
364
365 /* request already finished */
366 if (!mrq
367 || time_is_after_jiffies(host->last_req_ts +
368 msecs_to_jiffies(2000))) {
369 spin_unlock_irqrestore(&host->lock, flags);
370 return;
371 }
372
373 dev_warn(&host->pdev->dev,
374 "timeout waiting for hardware interrupt (CMD%u)\n",
375 mrq->cmd->opcode);
376
377 if (host->data)
378 host->data->error = -ETIMEDOUT;
379 else if (host->cmd)
380 host->cmd->error = -ETIMEDOUT;
381 else
382 mrq->cmd->error = -ETIMEDOUT;
383
384 host->cmd = NULL;
385 host->data = NULL;
386 host->mrq = NULL;
387
388 spin_unlock_irqrestore(&host->lock, flags);
389
390 reset(host);
391
392 mmc_request_done(host->mmc, mrq);
393}
394
348static void 395static void
349tmio_mmc_finish_request(struct tmio_mmc_host *host) 396tmio_mmc_finish_request(struct tmio_mmc_host *host)
350{ 397{
351 struct mmc_request *mrq = host->mrq; 398 struct mmc_request *mrq = host->mrq;
352 399
400 if (!mrq)
401 return;
402
353 host->mrq = NULL; 403 host->mrq = NULL;
354 host->cmd = NULL; 404 host->cmd = NULL;
355 host->data = NULL; 405 host->data = NULL;
356 406
407 cancel_delayed_work(&host->delayed_reset_work);
408
357 mmc_request_done(host->mmc, mrq); 409 mmc_request_done(host->mmc, mrq);
358} 410}
359 411
@@ -463,6 +515,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
463 return; 515 return;
464} 516}
465 517
518/* needs to be called with host->lock held */
466static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 519static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
467{ 520{
468 struct mmc_data *data = host->data; 521 struct mmc_data *data = host->data;
@@ -519,10 +572,12 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
519 572
520static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 573static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
521{ 574{
522 struct mmc_data *data = host->data; 575 struct mmc_data *data;
576 spin_lock(&host->lock);
577 data = host->data;
523 578
524 if (!data) 579 if (!data)
525 return; 580 goto out;
526 581
527 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { 582 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
528 /* 583 /*
@@ -543,6 +598,8 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
543 } else { 598 } else {
544 tmio_mmc_do_data_irq(host); 599 tmio_mmc_do_data_irq(host);
545 } 600 }
601out:
602 spin_unlock(&host->lock);
546} 603}
547 604
548static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 605static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
@@ -551,9 +608,11 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
551 struct mmc_command *cmd = host->cmd; 608 struct mmc_command *cmd = host->cmd;
552 int i, addr; 609 int i, addr;
553 610
611 spin_lock(&host->lock);
612
554 if (!host->cmd) { 613 if (!host->cmd) {
555 pr_debug("Spurious CMD irq\n"); 614 pr_debug("Spurious CMD irq\n");
556 return; 615 goto out;
557 } 616 }
558 617
559 host->cmd = NULL; 618 host->cmd = NULL;
@@ -598,6 +657,9 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
598 tmio_mmc_finish_request(host); 657 tmio_mmc_finish_request(host);
599 } 658 }
600 659
660out:
661 spin_unlock(&host->lock);
662
601 return; 663 return;
602} 664}
603 665
@@ -906,6 +968,12 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
906static void tmio_tasklet_fn(unsigned long arg) 968static void tmio_tasklet_fn(unsigned long arg)
907{ 969{
908 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 970 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
971 unsigned long flags;
972
973 spin_lock_irqsave(&host->lock, flags);
974
975 if (!host->data)
976 goto out;
909 977
910 if (host->data->flags & MMC_DATA_READ) 978 if (host->data->flags & MMC_DATA_READ)
911 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, 979 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
@@ -915,6 +983,8 @@ static void tmio_tasklet_fn(unsigned long arg)
915 DMA_TO_DEVICE); 983 DMA_TO_DEVICE);
916 984
917 tmio_mmc_do_data_irq(host); 985 tmio_mmc_do_data_irq(host);
986out:
987 spin_unlock_irqrestore(&host->lock, flags);
918} 988}
919 989
920/* It might be necessary to make filter MFD specific */ 990/* It might be necessary to make filter MFD specific */
@@ -1037,6 +1107,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
1037 if (host->mrq) 1107 if (host->mrq)
1038 pr_debug("request not null\n"); 1108 pr_debug("request not null\n");
1039 1109
1110 host->last_req_ts = jiffies;
1111 wmb();
1040 host->mrq = mrq; 1112 host->mrq = mrq;
1041 1113
1042 if (mrq->data) { 1114 if (mrq->data) {
@@ -1046,10 +1118,14 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
1046 } 1118 }
1047 1119
1048 ret = tmio_mmc_start_command(host, mrq->cmd); 1120 ret = tmio_mmc_start_command(host, mrq->cmd);
1049 if (!ret) 1121 if (!ret) {
1122 schedule_delayed_work(&host->delayed_reset_work,
1123 msecs_to_jiffies(2000));
1050 return; 1124 return;
1125 }
1051 1126
1052fail: 1127fail:
1128 host->mrq = NULL;
1053 mrq->cmd->error = ret; 1129 mrq->cmd->error = ret;
1054 mmc_request_done(mmc, mrq); 1130 mmc_request_done(mmc, mrq);
1055} 1131}
@@ -1247,6 +1323,11 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
1247 if (ret) 1323 if (ret)
1248 goto cell_disable; 1324 goto cell_disable;
1249 1325
1326 spin_lock_init(&host->lock);
1327
1328 /* Init delayed work for request timeouts */
1329 INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
1330
1250 /* See if we also get DMA */ 1331 /* See if we also get DMA */
1251 tmio_mmc_request_dma(host, pdata); 1332 tmio_mmc_request_dma(host, pdata);
1252 1333
@@ -1285,6 +1366,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
1285 if (mmc) { 1366 if (mmc) {
1286 struct tmio_mmc_host *host = mmc_priv(mmc); 1367 struct tmio_mmc_host *host = mmc_priv(mmc);
1287 mmc_remove_host(mmc); 1368 mmc_remove_host(mmc);
1369 cancel_delayed_work_sync(&host->delayed_reset_work);
1288 tmio_mmc_release_dma(host); 1370 tmio_mmc_release_dma(host);
1289 free_irq(host->irq, host); 1371 free_irq(host->irq, host);
1290 if (cell->disable) 1372 if (cell->disable)