aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2011-07-14 06:12:38 -0400
committerChris Ball <cjb@laptop.org>2011-07-21 10:35:05 -0400
commitb9269fdd4f61aa4d185c982b0f84a3e7b7ccb4d2 (patch)
tree73cd02b34f61e71d6a4ceff5c09b78a9ba17eadb /drivers/mmc
parent15bed0f2fa8e1d7db201692532c210a7823d2d21 (diff)
mmc: tmio: fix recursive spinlock, don't schedule with interrupts disabled
Calling mmc_request_done() under a spinlock with interrupts disabled leads to a recursive spin-lock on request retry path and to scheduling in atomic context. This patch fixes both these problems by moving mmc_request_done() to the scheduler workqueue. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/tmio_mmc.h6
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c35
2 files changed, 34 insertions, 7 deletions
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 211ef6e7a82..f0d7c434979 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -18,6 +18,7 @@
18 18
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/mmc/tmio.h> 20#include <linux/mmc/tmio.h>
21#include <linux/mutex.h>
21#include <linux/pagemap.h> 22#include <linux/pagemap.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23 24
@@ -73,8 +74,11 @@ struct tmio_mmc_host {
73 74
74 /* Track lost interrupts */ 75 /* Track lost interrupts */
75 struct delayed_work delayed_reset_work; 76 struct delayed_work delayed_reset_work;
76 spinlock_t lock; 77 struct work_struct done;
78
79 spinlock_t lock; /* protect host private data */
77 unsigned long last_req_ts; 80 unsigned long last_req_ts;
81 struct mutex ios_lock; /* protect set_ios() context */
78}; 82};
79 83
80int tmio_mmc_host_probe(struct tmio_mmc_host **host, 84int tmio_mmc_host_probe(struct tmio_mmc_host **host,
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index f7dd3b1bf8c..a2f76adbfd3 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -250,10 +250,16 @@ static void tmio_mmc_reset_work(struct work_struct *work)
250/* called with host->lock held, interrupts disabled */ 250/* called with host->lock held, interrupts disabled */
251static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 251static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
252{ 252{
253 struct mmc_request *mrq = host->mrq; 253 struct mmc_request *mrq;
254 unsigned long flags;
254 255
255 if (!mrq) 256 spin_lock_irqsave(&host->lock, flags);
257
258 mrq = host->mrq;
259 if (IS_ERR_OR_NULL(mrq)) {
260 spin_unlock_irqrestore(&host->lock, flags);
256 return; 261 return;
262 }
257 263
258 host->cmd = NULL; 264 host->cmd = NULL;
259 host->data = NULL; 265 host->data = NULL;
@@ -262,11 +268,18 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
262 cancel_delayed_work(&host->delayed_reset_work); 268 cancel_delayed_work(&host->delayed_reset_work);
263 269
264 host->mrq = NULL; 270 host->mrq = NULL;
271 spin_unlock_irqrestore(&host->lock, flags);
265 272
266 /* FIXME: mmc_request_done() can schedule! */
267 mmc_request_done(host->mmc, mrq); 273 mmc_request_done(host->mmc, mrq);
268} 274}
269 275
276static void tmio_mmc_done_work(struct work_struct *work)
277{
278 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
279 done);
280 tmio_mmc_finish_request(host);
281}
282
270/* These are the bitmasks the tmio chip requires to implement the MMC response 283/* These are the bitmasks the tmio chip requires to implement the MMC response
271 * types. Note that R1 and R6 are the same in this scheme. */ 284 * types. Note that R1 and R6 are the same in this scheme. */
272#define APP_CMD 0x0040 285#define APP_CMD 0x0040
@@ -433,7 +446,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
433 BUG(); 446 BUG();
434 } 447 }
435 448
436 tmio_mmc_finish_request(host); 449 schedule_work(&host->done);
437} 450}
438 451
439static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 452static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
@@ -523,7 +536,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
523 tasklet_schedule(&host->dma_issue); 536 tasklet_schedule(&host->dma_issue);
524 } 537 }
525 } else { 538 } else {
526 tmio_mmc_finish_request(host); 539 schedule_work(&host->done);
527 } 540 }
528 541
529out: 542out:
@@ -573,7 +586,8 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
573 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 586 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
574 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 587 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
575 TMIO_STAT_CARD_REMOVE); 588 TMIO_STAT_CARD_REMOVE);
576 mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 589 if (!work_pending(&host->mmc->detect.work))
590 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
577 goto out; 591 goto out;
578 } 592 }
579 593
@@ -703,6 +717,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
703 struct tmio_mmc_data *pdata = host->pdata; 717 struct tmio_mmc_data *pdata = host->pdata;
704 unsigned long flags; 718 unsigned long flags;
705 719
720 mutex_lock(&host->ios_lock);
721
706 spin_lock_irqsave(&host->lock, flags); 722 spin_lock_irqsave(&host->lock, flags);
707 if (host->mrq) { 723 if (host->mrq) {
708 if (IS_ERR(host->mrq)) { 724 if (IS_ERR(host->mrq)) {
@@ -718,6 +734,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
718 host->mrq->cmd->opcode, host->last_req_ts, jiffies); 734 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
719 } 735 }
720 spin_unlock_irqrestore(&host->lock, flags); 736 spin_unlock_irqrestore(&host->lock, flags);
737
738 mutex_unlock(&host->ios_lock);
721 return; 739 return;
722 } 740 }
723 741
@@ -771,6 +789,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
771 current->comm, task_pid_nr(current), 789 current->comm, task_pid_nr(current),
772 ios->clock, ios->power_mode); 790 ios->clock, ios->power_mode);
773 host->mrq = NULL; 791 host->mrq = NULL;
792
793 mutex_unlock(&host->ios_lock);
774} 794}
775 795
776static int tmio_mmc_get_ro(struct mmc_host *mmc) 796static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -867,9 +887,11 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
867 tmio_mmc_enable_sdio_irq(mmc, 0); 887 tmio_mmc_enable_sdio_irq(mmc, 0);
868 888
869 spin_lock_init(&_host->lock); 889 spin_lock_init(&_host->lock);
890 mutex_init(&_host->ios_lock);
870 891
871 /* Init delayed work for request timeouts */ 892 /* Init delayed work for request timeouts */
872 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); 893 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
894 INIT_WORK(&_host->done, tmio_mmc_done_work);
873 895
874 /* See if we also get DMA */ 896 /* See if we also get DMA */
875 tmio_mmc_request_dma(_host, pdata); 897 tmio_mmc_request_dma(_host, pdata);
@@ -917,6 +939,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
917 pm_runtime_get_sync(&pdev->dev); 939 pm_runtime_get_sync(&pdev->dev);
918 940
919 mmc_remove_host(host->mmc); 941 mmc_remove_host(host->mmc);
942 cancel_work_sync(&host->done);
920 cancel_delayed_work_sync(&host->delayed_reset_work); 943 cancel_delayed_work_sync(&host->delayed_reset_work);
921 tmio_mmc_release_dma(host); 944 tmio_mmc_release_dma(host);
922 945