diff options
-rw-r--r-- | drivers/mmc/host/dw_mmc.c | 107 |
1 files changed, 94 insertions, 13 deletions
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 860313bd952a..4f2806720c5c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -401,16 +401,37 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) | |||
401 | static inline void dw_mci_set_cto(struct dw_mci *host) | 401 | static inline void dw_mci_set_cto(struct dw_mci *host) |
402 | { | 402 | { |
403 | unsigned int cto_clks; | 403 | unsigned int cto_clks; |
404 | unsigned int cto_div; | ||
404 | unsigned int cto_ms; | 405 | unsigned int cto_ms; |
406 | unsigned long irqflags; | ||
405 | 407 | ||
406 | cto_clks = mci_readl(host, TMOUT) & 0xff; | 408 | cto_clks = mci_readl(host, TMOUT) & 0xff; |
407 | cto_ms = DIV_ROUND_UP(cto_clks, host->bus_hz / 1000); | 409 | cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; |
410 | if (cto_div == 0) | ||
411 | cto_div = 1; | ||
412 | cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); | ||
408 | 413 | ||
409 | /* add a bit spare time */ | 414 | /* add a bit spare time */ |
410 | cto_ms += 10; | 415 | cto_ms += 10; |
411 | 416 | ||
412 | mod_timer(&host->cto_timer, | 417 | /* |
413 | jiffies + msecs_to_jiffies(cto_ms) + 1); | 418 | * The durations we're working with are fairly short so we have to be |
419 | * extra careful about synchronization here. Specifically in hardware a | ||
420 | * command timeout is _at most_ 5.1 ms, so that means we expect an | ||
421 | * interrupt (either command done or timeout) to come rather quickly | ||
422 | * after the mci_writel. ...but just in case we have a long interrupt | ||
423 | * latency let's add a bit of paranoia. | ||
424 | * | ||
425 | * In general we'll assume that at least an interrupt will be asserted | ||
426 | * in hardware by the time the cto_timer runs. ...and if it hasn't | ||
427 | * been asserted in hardware by that time then we'll assume it'll never | ||
428 | * come. | ||
429 | */ | ||
430 | spin_lock_irqsave(&host->irq_lock, irqflags); | ||
431 | if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) | ||
432 | mod_timer(&host->cto_timer, | ||
433 | jiffies + msecs_to_jiffies(cto_ms) + 1); | ||
434 | spin_unlock_irqrestore(&host->irq_lock, irqflags); | ||
414 | } | 435 | } |
415 | 436 | ||
416 | static void dw_mci_start_command(struct dw_mci *host, | 437 | static void dw_mci_start_command(struct dw_mci *host, |
@@ -425,11 +446,11 @@ static void dw_mci_start_command(struct dw_mci *host, | |||
425 | wmb(); /* drain writebuffer */ | 446 | wmb(); /* drain writebuffer */ |
426 | dw_mci_wait_while_busy(host, cmd_flags); | 447 | dw_mci_wait_while_busy(host, cmd_flags); |
427 | 448 | ||
449 | mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); | ||
450 | |||
428 | /* response expected command only */ | 451 | /* response expected command only */ |
429 | if (cmd_flags & SDMMC_CMD_RESP_EXP) | 452 | if (cmd_flags & SDMMC_CMD_RESP_EXP) |
430 | dw_mci_set_cto(host); | 453 | dw_mci_set_cto(host); |
431 | |||
432 | mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); | ||
433 | } | 454 | } |
434 | 455 | ||
435 | static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) | 456 | static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) |
@@ -1915,10 +1936,15 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) | |||
1915 | static void dw_mci_set_drto(struct dw_mci *host) | 1936 | static void dw_mci_set_drto(struct dw_mci *host) |
1916 | { | 1937 | { |
1917 | unsigned int drto_clks; | 1938 | unsigned int drto_clks; |
1939 | unsigned int drto_div; | ||
1918 | unsigned int drto_ms; | 1940 | unsigned int drto_ms; |
1919 | 1941 | ||
1920 | drto_clks = mci_readl(host, TMOUT) >> 8; | 1942 | drto_clks = mci_readl(host, TMOUT) >> 8; |
1921 | drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000); | 1943 | drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; |
1944 | if (drto_div == 0) | ||
1945 | drto_div = 1; | ||
1946 | drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, | ||
1947 | host->bus_hz); | ||
1922 | 1948 | ||
1923 | /* add a bit spare time */ | 1949 | /* add a bit spare time */ |
1924 | drto_ms += 10; | 1950 | drto_ms += 10; |
@@ -1926,6 +1952,24 @@ static void dw_mci_set_drto(struct dw_mci *host) | |||
1926 | mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); | 1952 | mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); |
1927 | } | 1953 | } |
1928 | 1954 | ||
1955 | static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) | ||
1956 | { | ||
1957 | if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) | ||
1958 | return false; | ||
1959 | |||
1960 | /* | ||
1961 | * Really be certain that the timer has stopped. This is a bit of | ||
1962 | * paranoia and could only really happen if we had really bad | ||
1963 | * interrupt latency and the interrupt routine and timeout were | ||
1964 | * running concurrently so that the del_timer() in the interrupt | ||
1965 | * handler couldn't run. | ||
1966 | */ | ||
1967 | WARN_ON(del_timer_sync(&host->cto_timer)); | ||
1968 | clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); | ||
1969 | |||
1970 | return true; | ||
1971 | } | ||
1972 | |||
1929 | static void dw_mci_tasklet_func(unsigned long priv) | 1973 | static void dw_mci_tasklet_func(unsigned long priv) |
1930 | { | 1974 | { |
1931 | struct dw_mci *host = (struct dw_mci *)priv; | 1975 | struct dw_mci *host = (struct dw_mci *)priv; |
@@ -1952,8 +1996,7 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
1952 | 1996 | ||
1953 | case STATE_SENDING_CMD11: | 1997 | case STATE_SENDING_CMD11: |
1954 | case STATE_SENDING_CMD: | 1998 | case STATE_SENDING_CMD: |
1955 | if (!test_and_clear_bit(EVENT_CMD_COMPLETE, | 1999 | if (!dw_mci_clear_pending_cmd_complete(host)) |
1956 | &host->pending_events)) | ||
1957 | break; | 2000 | break; |
1958 | 2001 | ||
1959 | cmd = host->cmd; | 2002 | cmd = host->cmd; |
@@ -2122,8 +2165,7 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
2122 | /* fall through */ | 2165 | /* fall through */ |
2123 | 2166 | ||
2124 | case STATE_SENDING_STOP: | 2167 | case STATE_SENDING_STOP: |
2125 | if (!test_and_clear_bit(EVENT_CMD_COMPLETE, | 2168 | if (!dw_mci_clear_pending_cmd_complete(host)) |
2126 | &host->pending_events)) | ||
2127 | break; | 2169 | break; |
2128 | 2170 | ||
2129 | /* CMD error in data command */ | 2171 | /* CMD error in data command */ |
@@ -2570,6 +2612,8 @@ done: | |||
2570 | 2612 | ||
2571 | static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) | 2613 | static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) |
2572 | { | 2614 | { |
2615 | del_timer(&host->cto_timer); | ||
2616 | |||
2573 | if (!host->cmd_status) | 2617 | if (!host->cmd_status) |
2574 | host->cmd_status = status; | 2618 | host->cmd_status = status; |
2575 | 2619 | ||
@@ -2594,6 +2638,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
2594 | struct dw_mci *host = dev_id; | 2638 | struct dw_mci *host = dev_id; |
2595 | u32 pending; | 2639 | u32 pending; |
2596 | struct dw_mci_slot *slot = host->slot; | 2640 | struct dw_mci_slot *slot = host->slot; |
2641 | unsigned long irqflags; | ||
2597 | 2642 | ||
2598 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ | 2643 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ |
2599 | 2644 | ||
@@ -2601,8 +2646,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
2601 | /* Check volt switch first, since it can look like an error */ | 2646 | /* Check volt switch first, since it can look like an error */ |
2602 | if ((host->state == STATE_SENDING_CMD11) && | 2647 | if ((host->state == STATE_SENDING_CMD11) && |
2603 | (pending & SDMMC_INT_VOLT_SWITCH)) { | 2648 | (pending & SDMMC_INT_VOLT_SWITCH)) { |
2604 | unsigned long irqflags; | ||
2605 | |||
2606 | mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); | 2649 | mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); |
2607 | pending &= ~SDMMC_INT_VOLT_SWITCH; | 2650 | pending &= ~SDMMC_INT_VOLT_SWITCH; |
2608 | 2651 | ||
@@ -2618,11 +2661,15 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
2618 | } | 2661 | } |
2619 | 2662 | ||
2620 | if (pending & DW_MCI_CMD_ERROR_FLAGS) { | 2663 | if (pending & DW_MCI_CMD_ERROR_FLAGS) { |
2664 | spin_lock_irqsave(&host->irq_lock, irqflags); | ||
2665 | |||
2621 | del_timer(&host->cto_timer); | 2666 | del_timer(&host->cto_timer); |
2622 | mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); | 2667 | mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); |
2623 | host->cmd_status = pending; | 2668 | host->cmd_status = pending; |
2624 | smp_wmb(); /* drain writebuffer */ | 2669 | smp_wmb(); /* drain writebuffer */ |
2625 | set_bit(EVENT_CMD_COMPLETE, &host->pending_events); | 2670 | set_bit(EVENT_CMD_COMPLETE, &host->pending_events); |
2671 | |||
2672 | spin_unlock_irqrestore(&host->irq_lock, irqflags); | ||
2626 | } | 2673 | } |
2627 | 2674 | ||
2628 | if (pending & DW_MCI_DATA_ERROR_FLAGS) { | 2675 | if (pending & DW_MCI_DATA_ERROR_FLAGS) { |
@@ -2662,9 +2709,12 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
2662 | } | 2709 | } |
2663 | 2710 | ||
2664 | if (pending & SDMMC_INT_CMD_DONE) { | 2711 | if (pending & SDMMC_INT_CMD_DONE) { |
2665 | del_timer(&host->cto_timer); | 2712 | spin_lock_irqsave(&host->irq_lock, irqflags); |
2713 | |||
2666 | mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); | 2714 | mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); |
2667 | dw_mci_cmd_interrupt(host, pending); | 2715 | dw_mci_cmd_interrupt(host, pending); |
2716 | |||
2717 | spin_unlock_irqrestore(&host->irq_lock, irqflags); | ||
2668 | } | 2718 | } |
2669 | 2719 | ||
2670 | if (pending & SDMMC_INT_CD) { | 2720 | if (pending & SDMMC_INT_CD) { |
@@ -2938,7 +2988,35 @@ static void dw_mci_cmd11_timer(unsigned long arg) | |||
2938 | static void dw_mci_cto_timer(unsigned long arg) | 2988 | static void dw_mci_cto_timer(unsigned long arg) |
2939 | { | 2989 | { |
2940 | struct dw_mci *host = (struct dw_mci *)arg; | 2990 | struct dw_mci *host = (struct dw_mci *)arg; |
2991 | unsigned long irqflags; | ||
2992 | u32 pending; | ||
2941 | 2993 | ||
2994 | spin_lock_irqsave(&host->irq_lock, irqflags); | ||
2995 | |||
2996 | /* | ||
2997 | * If somehow we have very bad interrupt latency it's remotely possible | ||
2998 | * that the timer could fire while the interrupt is still pending or | ||
2999 | * while the interrupt is midway through running. Let's be paranoid | ||
3000 | * and detect those two cases. Note that this is paranoia is somewhat | ||
3001 | * justified because in this function we don't actually cancel the | ||
3002 | * pending command in the controller--we just assume it will never come. | ||
3003 | */ | ||
3004 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ | ||
3005 | if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { | ||
3006 | /* The interrupt should fire; no need to act but we can warn */ | ||
3007 | dev_warn(host->dev, "Unexpected interrupt latency\n"); | ||
3008 | goto exit; | ||
3009 | } | ||
3010 | if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { | ||
3011 | /* Presumably interrupt handler couldn't delete the timer */ | ||
3012 | dev_warn(host->dev, "CTO timeout when already completed\n"); | ||
3013 | goto exit; | ||
3014 | } | ||
3015 | |||
3016 | /* | ||
3017 | * Continued paranoia to make sure we're in the state we expect. | ||
3018 | * This paranoia isn't really justified but it seems good to be safe. | ||
3019 | */ | ||
2942 | switch (host->state) { | 3020 | switch (host->state) { |
2943 | case STATE_SENDING_CMD11: | 3021 | case STATE_SENDING_CMD11: |
2944 | case STATE_SENDING_CMD: | 3022 | case STATE_SENDING_CMD: |
@@ -2957,6 +3035,9 @@ static void dw_mci_cto_timer(unsigned long arg) | |||
2957 | host->state); | 3035 | host->state); |
2958 | break; | 3036 | break; |
2959 | } | 3037 | } |
3038 | |||
3039 | exit: | ||
3040 | spin_unlock_irqrestore(&host->irq_lock, irqflags); | ||
2960 | } | 3041 | } |
2961 | 3042 | ||
2962 | static void dw_mci_dto_timer(unsigned long arg) | 3043 | static void dw_mci_dto_timer(unsigned long arg) |