diff options
author | Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 2011-04-21 03:20:16 -0400 |
---|---|---|
committer | Chris Ball <cjb@laptop.org> | 2011-05-24 23:53:52 -0400 |
commit | df3ef2d3c92c0a562ebde3699af7d12401fddf60 (patch) | |
tree | 4969fcb7e716bcedd8634ccfdf14e3d1f274abee /drivers/mmc | |
parent | faca6648e6f9659400bdafb985b50ba41f1a23b5 (diff) |
mmc: protect the tmio_mmc driver against a theoretical race
The MMC subsystem does not guarantee, that host driver .request() and
.set_ios() callbacks are serialised. Such concurrent calls, however,
do not have to be meaningfully supported, drivers just have to make
sure to avoid any severe problems.
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/host/tmio_mmc.h | 1 | ||||
-rw-r--r-- | drivers/mmc/host/tmio_mmc_pio.c | 62 |
2 files changed, 57 insertions, 6 deletions
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index c1470e6b138d..58138a203877 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
20 | #include <linux/mmc/tmio.h> | 20 | #include <linux/mmc/tmio.h> |
21 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
22 | #include <linux/spinlock.h> | ||
22 | 23 | ||
23 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | 24 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ |
24 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | 25 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 |
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 8b05d3ceb29c..ea6ade31bcc7 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -244,8 +244,12 @@ static void tmio_mmc_reset_work(struct work_struct *work) | |||
244 | spin_lock_irqsave(&host->lock, flags); | 244 | spin_lock_irqsave(&host->lock, flags); |
245 | mrq = host->mrq; | 245 | mrq = host->mrq; |
246 | 246 | ||
247 | /* request already finished */ | 247 | /* |
248 | if (!mrq | 248 | * is request already finished? Since we use a non-blocking |
249 | * cancel_delayed_work(), it can happen, that a .set_ios() call preempts | ||
250 | * us, so, have to check for IS_ERR(host->mrq) | ||
251 | */ | ||
252 | if (IS_ERR_OR_NULL(mrq) | ||
249 | || time_is_after_jiffies(host->last_req_ts + | 253 | || time_is_after_jiffies(host->last_req_ts + |
250 | msecs_to_jiffies(2000))) { | 254 | msecs_to_jiffies(2000))) { |
251 | spin_unlock_irqrestore(&host->lock, flags); | 255 | spin_unlock_irqrestore(&host->lock, flags); |
@@ -265,16 +269,19 @@ static void tmio_mmc_reset_work(struct work_struct *work) | |||
265 | 269 | ||
266 | host->cmd = NULL; | 270 | host->cmd = NULL; |
267 | host->data = NULL; | 271 | host->data = NULL; |
268 | host->mrq = NULL; | ||
269 | host->force_pio = false; | 272 | host->force_pio = false; |
270 | 273 | ||
271 | spin_unlock_irqrestore(&host->lock, flags); | 274 | spin_unlock_irqrestore(&host->lock, flags); |
272 | 275 | ||
273 | tmio_mmc_reset(host); | 276 | tmio_mmc_reset(host); |
274 | 277 | ||
278 | /* Ready for new calls */ | ||
279 | host->mrq = NULL; | ||
280 | |||
275 | mmc_request_done(host->mmc, mrq); | 281 | mmc_request_done(host->mmc, mrq); |
276 | } | 282 | } |
277 | 283 | ||
284 | /* called with host->lock held, interrupts disabled */ | ||
278 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | 285 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) |
279 | { | 286 | { |
280 | struct mmc_request *mrq = host->mrq; | 287 | struct mmc_request *mrq = host->mrq; |
@@ -282,13 +289,15 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | |||
282 | if (!mrq) | 289 | if (!mrq) |
283 | return; | 290 | return; |
284 | 291 | ||
285 | host->mrq = NULL; | ||
286 | host->cmd = NULL; | 292 | host->cmd = NULL; |
287 | host->data = NULL; | 293 | host->data = NULL; |
288 | host->force_pio = false; | 294 | host->force_pio = false; |
289 | 295 | ||
290 | cancel_delayed_work(&host->delayed_reset_work); | 296 | cancel_delayed_work(&host->delayed_reset_work); |
291 | 297 | ||
298 | host->mrq = NULL; | ||
299 | |||
300 | /* FIXME: mmc_request_done() can schedule! */ | ||
292 | mmc_request_done(host->mmc, mrq); | 301 | mmc_request_done(host->mmc, mrq); |
293 | } | 302 | } |
294 | 303 | ||
@@ -686,15 +695,27 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, | |||
686 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | 695 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) |
687 | { | 696 | { |
688 | struct tmio_mmc_host *host = mmc_priv(mmc); | 697 | struct tmio_mmc_host *host = mmc_priv(mmc); |
698 | unsigned long flags; | ||
689 | int ret; | 699 | int ret; |
690 | 700 | ||
691 | if (host->mrq) | 701 | spin_lock_irqsave(&host->lock, flags); |
702 | |||
703 | if (host->mrq) { | ||
692 | pr_debug("request not null\n"); | 704 | pr_debug("request not null\n"); |
705 | if (IS_ERR(host->mrq)) { | ||
706 | spin_unlock_irqrestore(&host->lock, flags); | ||
707 | mrq->cmd->error = -EAGAIN; | ||
708 | mmc_request_done(mmc, mrq); | ||
709 | return; | ||
710 | } | ||
711 | } | ||
693 | 712 | ||
694 | host->last_req_ts = jiffies; | 713 | host->last_req_ts = jiffies; |
695 | wmb(); | 714 | wmb(); |
696 | host->mrq = mrq; | 715 | host->mrq = mrq; |
697 | 716 | ||
717 | spin_unlock_irqrestore(&host->lock, flags); | ||
718 | |||
698 | if (mrq->data) { | 719 | if (mrq->data) { |
699 | ret = tmio_mmc_start_data(host, mrq->data); | 720 | ret = tmio_mmc_start_data(host, mrq->data); |
700 | if (ret) | 721 | if (ret) |
@@ -709,8 +730,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
709 | } | 730 | } |
710 | 731 | ||
711 | fail: | 732 | fail: |
712 | host->mrq = NULL; | ||
713 | host->force_pio = false; | 733 | host->force_pio = false; |
734 | host->mrq = NULL; | ||
714 | mrq->cmd->error = ret; | 735 | mrq->cmd->error = ret; |
715 | mmc_request_done(mmc, mrq); | 736 | mmc_request_done(mmc, mrq); |
716 | } | 737 | } |
@@ -724,6 +745,29 @@ fail: | |||
724 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 745 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
725 | { | 746 | { |
726 | struct tmio_mmc_host *host = mmc_priv(mmc); | 747 | struct tmio_mmc_host *host = mmc_priv(mmc); |
748 | unsigned long flags; | ||
749 | |||
750 | spin_lock_irqsave(&host->lock, flags); | ||
751 | if (host->mrq) { | ||
752 | if (IS_ERR(host->mrq)) { | ||
753 | dev_dbg(&host->pdev->dev, | ||
754 | "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", | ||
755 | current->comm, task_pid_nr(current), | ||
756 | ios->clock, ios->power_mode); | ||
757 | host->mrq = ERR_PTR(-EINTR); | ||
758 | } else { | ||
759 | dev_dbg(&host->pdev->dev, | ||
760 | "%s.%d: CMD%u active since %lu, now %lu!\n", | ||
761 | current->comm, task_pid_nr(current), | ||
762 | host->mrq->cmd->opcode, host->last_req_ts, jiffies); | ||
763 | } | ||
764 | spin_unlock_irqrestore(&host->lock, flags); | ||
765 | return; | ||
766 | } | ||
767 | |||
768 | host->mrq = ERR_PTR(-EBUSY); | ||
769 | |||
770 | spin_unlock_irqrestore(&host->lock, flags); | ||
727 | 771 | ||
728 | if (ios->clock) | 772 | if (ios->clock) |
729 | tmio_mmc_set_clock(host, ios->clock); | 773 | tmio_mmc_set_clock(host, ios->clock); |
@@ -754,6 +798,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
754 | 798 | ||
755 | /* Let things settle. delay taken from winCE driver */ | 799 | /* Let things settle. delay taken from winCE driver */ |
756 | udelay(140); | 800 | udelay(140); |
801 | if (PTR_ERR(host->mrq) == -EINTR) | ||
802 | dev_dbg(&host->pdev->dev, | ||
803 | "%s.%d: IOS interrupted: clk %u, mode %u", | ||
804 | current->comm, task_pid_nr(current), | ||
805 | ios->clock, ios->power_mode); | ||
806 | host->mrq = NULL; | ||
757 | } | 807 | } |
758 | 808 | ||
759 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | 809 | static int tmio_mmc_get_ro(struct mmc_host *mmc) |