diff options
author | Adrian Hunter <adrian.hunter@nokia.com> | 2009-09-22 19:44:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 10:39:35 -0400 |
commit | 4dffd7a251172d78a157ff45ec1207012f44774a (patch) | |
tree | 35db86950a7c43ba4b69dc64220c57d10e15f693 /drivers/mmc | |
parent | 23050103c21d4d5314b7c978187e6e4305a00495 (diff) |
omap_hsmmc: prevent races with irq handler
If an unexpected interrupt occurs while preparing the next request, an
oops can occur.
For example, a new request is setting up DMA for data transfer so
host->data is not NULL. An unexpected transfer complete (TC) interrupt
comes along and the interrupt handler sets host->data to NULL. Oops!
Prevent that by adding a spinlock.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Matt Fleming <matt@console-pimps.org>
Cc: Ian Molton <ian@mnementh.co.uk>
Cc: "Roberto A. Foglietta" <roberto.foglietta@gmail.com>
Cc: Jarkko Lavinen <jarkko.lavinen@nokia.com>
Cc: Denis Karpov <ext-denis.2.karpov@nokia.com>
Cc: Pierre Ossman <pierre@ossman.eu>
Cc: Philip Langdale <philipl@overt.org>
Cc: "Madhusudhan" <madhu.cr@ti.com>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/host/omap_hsmmc.c | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index f9ed5e23f145..0bfea9c2a597 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -148,6 +148,8 @@ struct mmc_omap_host { | |||
148 | struct work_struct mmc_carddetect_work; | 148 | struct work_struct mmc_carddetect_work; |
149 | void __iomem *base; | 149 | void __iomem *base; |
150 | resource_size_t mapbase; | 150 | resource_size_t mapbase; |
151 | spinlock_t irq_lock; /* Prevent races with irq handler */ | ||
152 | unsigned long flags; | ||
151 | unsigned int id; | 153 | unsigned int id; |
152 | unsigned int dma_len; | 154 | unsigned int dma_len; |
153 | unsigned int dma_sg_idx; | 155 | unsigned int dma_sg_idx; |
@@ -459,6 +461,14 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd, | |||
459 | if (host->use_dma) | 461 | if (host->use_dma) |
460 | cmdreg |= DMA_EN; | 462 | cmdreg |= DMA_EN; |
461 | 463 | ||
464 | /* | ||
465 | * In an interrupt context (i.e. STOP command), the spinlock is unlocked | ||
466 | * by the interrupt handler, otherwise (i.e. for a new request) it is | ||
467 | * unlocked here. | ||
468 | */ | ||
469 | if (!in_interrupt()) | ||
470 | spin_unlock_irqrestore(&host->irq_lock, host->flags); | ||
471 | |||
462 | OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); | 472 | OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); |
463 | OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); | 473 | OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); |
464 | } | 474 | } |
@@ -621,11 +631,14 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | |||
621 | struct mmc_data *data; | 631 | struct mmc_data *data; |
622 | int end_cmd = 0, end_trans = 0, status; | 632 | int end_cmd = 0, end_trans = 0, status; |
623 | 633 | ||
634 | spin_lock(&host->irq_lock); | ||
635 | |||
624 | if (host->mrq == NULL) { | 636 | if (host->mrq == NULL) { |
625 | OMAP_HSMMC_WRITE(host->base, STAT, | 637 | OMAP_HSMMC_WRITE(host->base, STAT, |
626 | OMAP_HSMMC_READ(host->base, STAT)); | 638 | OMAP_HSMMC_READ(host->base, STAT)); |
627 | /* Flush posted write */ | 639 | /* Flush posted write */ |
628 | OMAP_HSMMC_READ(host->base, STAT); | 640 | OMAP_HSMMC_READ(host->base, STAT); |
641 | spin_unlock(&host->irq_lock); | ||
629 | return IRQ_HANDLED; | 642 | return IRQ_HANDLED; |
630 | } | 643 | } |
631 | 644 | ||
@@ -690,6 +703,8 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) | |||
690 | if ((end_trans || (status & TC)) && host->mrq) | 703 | if ((end_trans || (status & TC)) && host->mrq) |
691 | mmc_omap_xfer_done(host, data); | 704 | mmc_omap_xfer_done(host, data); |
692 | 705 | ||
706 | spin_unlock(&host->irq_lock); | ||
707 | |||
693 | return IRQ_HANDLED; | 708 | return IRQ_HANDLED; |
694 | } | 709 | } |
695 | 710 | ||
@@ -1018,6 +1033,13 @@ static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req) | |||
1018 | struct mmc_omap_host *host = mmc_priv(mmc); | 1033 | struct mmc_omap_host *host = mmc_priv(mmc); |
1019 | int err; | 1034 | int err; |
1020 | 1035 | ||
1036 | /* | ||
1037 | * Prevent races with the interrupt handler because of unexpected | ||
1038 | * interrupts, but not if we are already in interrupt context i.e. | ||
1039 | * retries. | ||
1040 | */ | ||
1041 | if (!in_interrupt()) | ||
1042 | spin_lock_irqsave(&host->irq_lock, host->flags); | ||
1021 | WARN_ON(host->mrq != NULL); | 1043 | WARN_ON(host->mrq != NULL); |
1022 | host->mrq = req; | 1044 | host->mrq = req; |
1023 | err = mmc_omap_prepare_data(host, req); | 1045 | err = mmc_omap_prepare_data(host, req); |
@@ -1026,6 +1048,8 @@ static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req) | |||
1026 | if (req->data) | 1048 | if (req->data) |
1027 | req->data->error = err; | 1049 | req->data->error = err; |
1028 | host->mrq = NULL; | 1050 | host->mrq = NULL; |
1051 | if (!in_interrupt()) | ||
1052 | spin_unlock_irqrestore(&host->irq_lock, host->flags); | ||
1029 | mmc_request_done(mmc, req); | 1053 | mmc_request_done(mmc, req); |
1030 | return; | 1054 | return; |
1031 | } | 1055 | } |
@@ -1580,6 +1604,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev) | |||
1580 | mmc->f_max = 52000000; | 1604 | mmc->f_max = 52000000; |
1581 | 1605 | ||
1582 | sema_init(&host->sem, 1); | 1606 | sema_init(&host->sem, 1); |
1607 | spin_lock_init(&host->irq_lock); | ||
1583 | 1608 | ||
1584 | host->iclk = clk_get(&pdev->dev, "ick"); | 1609 | host->iclk = clk_get(&pdev->dev, "ick"); |
1585 | if (IS_ERR(host->iclk)) { | 1610 | if (IS_ERR(host->iclk)) { |