aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDouglas Anderson <dianders@chromium.org>2017-10-12 16:11:18 -0400
committerUlf Hansson <ulf.hansson@linaro.org>2017-11-02 10:20:41 -0400
commit93c23ae385299f889606b42507b12b40e50d6088 (patch)
treeab9222986faed046208bb655bcabd8abd1a1f54d
parenteaaffcefaa8015aaf496590707ddfa70905e0001 (diff)
mmc: dw_mmc: Cleanup the DTO timer like the CTO one
The recent CTO timer introduced in commit 03de19212ea3 ("mmc: dw_mmc: introduce timer for broken command transfer over scheme") was causing observable problems due to race conditions. Previous patches have fixed those race conditions. It can be observed that these same race conditions ought to be theoretically possible with the DTO timer too though they are massively less likely to happen because the data timeout is always set to 0xffffff right now. That means even at a 200 MHz card clock we were arming the DTO timer for 94 ms: >>> (0xffffff * 1000. / 200000000) + 10 93.886075 We always also were setting the DTO timer _after_ starting the transfer, unlike how the old code was seting the CTO timer. In any case, even though the DTO timer is much less likely to have races, it still makes sense to add code to handle it _just in case_. Signed-off-by: Douglas Anderson <dianders@chromium.org> Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
-rw-r--r--drivers/mmc/host/dw_mmc.c55
1 files changed, 52 insertions, 3 deletions
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index c365742d6b7f..37b55b095daf 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1938,6 +1938,7 @@ static void dw_mci_set_drto(struct dw_mci *host)
1938 unsigned int drto_clks; 1938 unsigned int drto_clks;
1939 unsigned int drto_div; 1939 unsigned int drto_div;
1940 unsigned int drto_ms; 1940 unsigned int drto_ms;
1941 unsigned long irqflags;
1941 1942
1942 drto_clks = mci_readl(host, TMOUT) >> 8; 1943 drto_clks = mci_readl(host, TMOUT) >> 8;
1943 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 1944 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
@@ -1949,7 +1950,11 @@ static void dw_mci_set_drto(struct dw_mci *host)
1949 /* add a bit spare time */ 1950 /* add a bit spare time */
1950 drto_ms += 10; 1951 drto_ms += 10;
1951 1952
1952 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); 1953 spin_lock_irqsave(&host->irq_lock, irqflags);
1954 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1955 mod_timer(&host->dto_timer,
1956 jiffies + msecs_to_jiffies(drto_ms));
1957 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1953} 1958}
1954 1959
1955static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) 1960static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
@@ -1970,6 +1975,18 @@ static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
1970 return true; 1975 return true;
1971} 1976}
1972 1977
1978static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
1979{
1980 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1981 return false;
1982
1983 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
1984 WARN_ON(del_timer_sync(&host->dto_timer));
1985 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1986
1987 return true;
1988}
1989
1973static void dw_mci_tasklet_func(unsigned long priv) 1990static void dw_mci_tasklet_func(unsigned long priv)
1974{ 1991{
1975 struct dw_mci *host = (struct dw_mci *)priv; 1992 struct dw_mci *host = (struct dw_mci *)priv;
@@ -2111,8 +2128,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
2111 /* fall through */ 2128 /* fall through */
2112 2129
2113 case STATE_DATA_BUSY: 2130 case STATE_DATA_BUSY:
2114 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 2131 if (!dw_mci_clear_pending_data_complete(host)) {
2115 &host->pending_events)) {
2116 /* 2132 /*
2117 * If data error interrupt comes but data over 2133 * If data error interrupt comes but data over
2118 * interrupt doesn't come within the given time. 2134 * interrupt doesn't come within the given time.
@@ -2682,6 +2698,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2682 } 2698 }
2683 2699
2684 if (pending & SDMMC_INT_DATA_OVER) { 2700 if (pending & SDMMC_INT_DATA_OVER) {
2701 spin_lock_irqsave(&host->irq_lock, irqflags);
2702
2685 del_timer(&host->dto_timer); 2703 del_timer(&host->dto_timer);
2686 2704
2687 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2705 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
@@ -2694,6 +2712,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2694 } 2712 }
2695 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2713 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2696 tasklet_schedule(&host->tasklet); 2714 tasklet_schedule(&host->tasklet);
2715
2716 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2697 } 2717 }
2698 2718
2699 if (pending & SDMMC_INT_RXDR) { 2719 if (pending & SDMMC_INT_RXDR) {
@@ -3043,7 +3063,31 @@ exit:
3043static void dw_mci_dto_timer(unsigned long arg) 3063static void dw_mci_dto_timer(unsigned long arg)
3044{ 3064{
3045 struct dw_mci *host = (struct dw_mci *)arg; 3065 struct dw_mci *host = (struct dw_mci *)arg;
3066 unsigned long irqflags;
3067 u32 pending;
3068
3069 spin_lock_irqsave(&host->irq_lock, irqflags);
3046 3070
3071 /*
3072 * The DTO timer is much longer than the CTO timer, so it's even less
3073 * likely that we'll these cases, but it pays to be paranoid.
3074 */
3075 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3076 if (pending & SDMMC_INT_DATA_OVER) {
3077 /* The interrupt should fire; no need to act but we can warn */
3078 dev_warn(host->dev, "Unexpected data interrupt latency\n");
3079 goto exit;
3080 }
3081 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3082 /* Presumably interrupt handler couldn't delete the timer */
3083 dev_warn(host->dev, "DTO timeout when already completed\n");
3084 goto exit;
3085 }
3086
3087 /*
3088 * Continued paranoia to make sure we're in the state we expect.
3089 * This paranoia isn't really justified but it seems good to be safe.
3090 */
3047 switch (host->state) { 3091 switch (host->state) {
3048 case STATE_SENDING_DATA: 3092 case STATE_SENDING_DATA:
3049 case STATE_DATA_BUSY: 3093 case STATE_DATA_BUSY:
@@ -3058,8 +3102,13 @@ static void dw_mci_dto_timer(unsigned long arg)
3058 tasklet_schedule(&host->tasklet); 3102 tasklet_schedule(&host->tasklet);
3059 break; 3103 break;
3060 default: 3104 default:
3105 dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3106 host->state);
3061 break; 3107 break;
3062 } 3108 }
3109
3110exit:
3111 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3063} 3112}
3064 3113
3065#ifdef CONFIG_OF 3114#ifdef CONFIG_OF