aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/tmio_mmc.c90
1 files changed, 86 insertions, 4 deletions
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 81bed310ddcd..689a3692242e 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -39,6 +39,8 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/pagemap.h> 40#include <linux/pagemap.h>
41#include <linux/scatterlist.h> 41#include <linux/scatterlist.h>
42#include <linux/workqueue.h>
43#include <linux/spinlock.h>
42 44
43#define CTL_SD_CMD 0x00 45#define CTL_SD_CMD 0x00
44#define CTL_ARG_REG 0x04 46#define CTL_ARG_REG 0x04
@@ -154,6 +156,11 @@ struct tmio_mmc_host {
154 u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); 156 u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
155 struct scatterlist bounce_sg; 157 struct scatterlist bounce_sg;
156#endif 158#endif
159
160 /* Track lost interrupts */
161 struct delayed_work delayed_reset_work;
162 spinlock_t lock;
163 unsigned long last_req_ts;
157}; 164};
158 165
159static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); 166static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
@@ -345,15 +352,60 @@ static void reset(struct tmio_mmc_host *host)
345 msleep(10); 352 msleep(10);
346} 353}
347 354
355static void tmio_mmc_reset_work(struct work_struct *work)
356{
357 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
358 delayed_reset_work.work);
359 struct mmc_request *mrq;
360 unsigned long flags;
361
362 spin_lock_irqsave(&host->lock, flags);
363 mrq = host->mrq;
364
365 /* request already finished */
366 if (!mrq
367 || time_is_after_jiffies(host->last_req_ts +
368 msecs_to_jiffies(2000))) {
369 spin_unlock_irqrestore(&host->lock, flags);
370 return;
371 }
372
373 dev_warn(&host->pdev->dev,
374 "timeout waiting for hardware interrupt (CMD%u)\n",
375 mrq->cmd->opcode);
376
377 if (host->data)
378 host->data->error = -ETIMEDOUT;
379 else if (host->cmd)
380 host->cmd->error = -ETIMEDOUT;
381 else
382 mrq->cmd->error = -ETIMEDOUT;
383
384 host->cmd = NULL;
385 host->data = NULL;
386 host->mrq = NULL;
387
388 spin_unlock_irqrestore(&host->lock, flags);
389
390 reset(host);
391
392 mmc_request_done(host->mmc, mrq);
393}
394
348static void 395static void
349tmio_mmc_finish_request(struct tmio_mmc_host *host) 396tmio_mmc_finish_request(struct tmio_mmc_host *host)
350{ 397{
351 struct mmc_request *mrq = host->mrq; 398 struct mmc_request *mrq = host->mrq;
352 399
400 if (!mrq)
401 return;
402
353 host->mrq = NULL; 403 host->mrq = NULL;
354 host->cmd = NULL; 404 host->cmd = NULL;
355 host->data = NULL; 405 host->data = NULL;
356 406
407 cancel_delayed_work(&host->delayed_reset_work);
408
357 mmc_request_done(host->mmc, mrq); 409 mmc_request_done(host->mmc, mrq);
358} 410}
359 411
@@ -463,6 +515,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
463 return; 515 return;
464} 516}
465 517
518/* needs to be called with host->lock held */
466static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 519static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
467{ 520{
468 struct mmc_data *data = host->data; 521 struct mmc_data *data = host->data;
@@ -519,10 +572,12 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
519 572
520static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 573static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
521{ 574{
522 struct mmc_data *data = host->data; 575 struct mmc_data *data;
576 spin_lock(&host->lock);
577 data = host->data;
523 578
524 if (!data) 579 if (!data)
525 return; 580 goto out;
526 581
527 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { 582 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
528 /* 583 /*
@@ -543,6 +598,8 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
543 } else { 598 } else {
544 tmio_mmc_do_data_irq(host); 599 tmio_mmc_do_data_irq(host);
545 } 600 }
601out:
602 spin_unlock(&host->lock);
546} 603}
547 604
548static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 605static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
@@ -551,9 +608,11 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
551 struct mmc_command *cmd = host->cmd; 608 struct mmc_command *cmd = host->cmd;
552 int i, addr; 609 int i, addr;
553 610
611 spin_lock(&host->lock);
612
554 if (!host->cmd) { 613 if (!host->cmd) {
555 pr_debug("Spurious CMD irq\n"); 614 pr_debug("Spurious CMD irq\n");
556 return; 615 goto out;
557 } 616 }
558 617
559 host->cmd = NULL; 618 host->cmd = NULL;
@@ -598,6 +657,9 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
598 tmio_mmc_finish_request(host); 657 tmio_mmc_finish_request(host);
599 } 658 }
600 659
660out:
661 spin_unlock(&host->lock);
662
601 return; 663 return;
602} 664}
603 665
@@ -906,6 +968,12 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
906static void tmio_tasklet_fn(unsigned long arg) 968static void tmio_tasklet_fn(unsigned long arg)
907{ 969{
908 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 970 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
971 unsigned long flags;
972
973 spin_lock_irqsave(&host->lock, flags);
974
975 if (!host->data)
976 goto out;
909 977
910 if (host->data->flags & MMC_DATA_READ) 978 if (host->data->flags & MMC_DATA_READ)
911 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, 979 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
@@ -915,6 +983,8 @@ static void tmio_tasklet_fn(unsigned long arg)
915 DMA_TO_DEVICE); 983 DMA_TO_DEVICE);
916 984
917 tmio_mmc_do_data_irq(host); 985 tmio_mmc_do_data_irq(host);
986out:
987 spin_unlock_irqrestore(&host->lock, flags);
918} 988}
919 989
920/* It might be necessary to make filter MFD specific */ 990/* It might be necessary to make filter MFD specific */
@@ -1037,6 +1107,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
1037 if (host->mrq) 1107 if (host->mrq)
1038 pr_debug("request not null\n"); 1108 pr_debug("request not null\n");
1039 1109
1110 host->last_req_ts = jiffies;
1111 wmb();
1040 host->mrq = mrq; 1112 host->mrq = mrq;
1041 1113
1042 if (mrq->data) { 1114 if (mrq->data) {
@@ -1046,10 +1118,14 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
1046 } 1118 }
1047 1119
1048 ret = tmio_mmc_start_command(host, mrq->cmd); 1120 ret = tmio_mmc_start_command(host, mrq->cmd);
1049 if (!ret) 1121 if (!ret) {
1122 schedule_delayed_work(&host->delayed_reset_work,
1123 msecs_to_jiffies(2000));
1050 return; 1124 return;
1125 }
1051 1126
1052fail: 1127fail:
1128 host->mrq = NULL;
1053 mrq->cmd->error = ret; 1129 mrq->cmd->error = ret;
1054 mmc_request_done(mmc, mrq); 1130 mmc_request_done(mmc, mrq);
1055} 1131}
@@ -1247,6 +1323,11 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
1247 if (ret) 1323 if (ret)
1248 goto cell_disable; 1324 goto cell_disable;
1249 1325
1326 spin_lock_init(&host->lock);
1327
1328 /* Init delayed work for request timeouts */
1329 INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
1330
1250 /* See if we also get DMA */ 1331 /* See if we also get DMA */
1251 tmio_mmc_request_dma(host, pdata); 1332 tmio_mmc_request_dma(host, pdata);
1252 1333
@@ -1285,6 +1366,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
1285 if (mmc) { 1366 if (mmc) {
1286 struct tmio_mmc_host *host = mmc_priv(mmc); 1367 struct tmio_mmc_host *host = mmc_priv(mmc);
1287 mmc_remove_host(mmc); 1368 mmc_remove_host(mmc);
1369 cancel_delayed_work_sync(&host->delayed_reset_work);
1288 tmio_mmc_release_dma(host); 1370 tmio_mmc_release_dma(host);
1289 free_irq(host->irq, host); 1371 free_irq(host->irq, host);
1290 if (cell->disable) 1372 if (cell->disable)