diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 13:17:35 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 13:17:35 -0500 |
commit | dee02770cdcd8bc06a48c917ce5df2fb56cf6059 (patch) | |
tree | c79799cc851a224a02c007ff5122e12992bde7ab /drivers/mmc/core/core.c | |
parent | e4a8ca3baa5557fa54557d42b5910ed0d3316922 (diff) | |
parent | 06641e8deae68ee2769c734158bc9170be257bb9 (diff) |
Merge tag 'mmc-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
Pull MMC updates from Ulf Hansson:
"MMC core:
- Introduce host claiming by context to support blkmq
- Preparations for enabling CQE (eMMC CMDQ) requests
- Re-factorizations to prepare for blkmq support
- Re-factorizations to prepare for CQE support
- Fix signal voltage switch for SD cards without power cycle
- Convert RPMB to a character device
- Export eMMC revision via sysfs
- Support eMMC DT binding for fixed driver type
- Document mmc_regulator_get_supply() API
MMC host:
- omap_hsmmc: Updated regulator management for PBIAS
- sdhci-omap: Add new OMAP SDHCI driver
- meson-mx-sdio: New driver for the Amlogic Meson8 and Meson8b SoCs
- sdhci-pci: Add support for Intel CDF
- sdhci-acpi: Fix voltage switch for some Intel host controllers
- sdhci-msm: Enable delay circuit calibration clocks
- sdhci-msm: Manage power IRQ properly
- mediatek: Add support of mt2701/mt2712
- mediatek: Updates management of clocks and tunings
- mediatek: Upgrade eMMC HS400 support
- rtsx_pci: Update tuning for gen3 PCI-Express
- renesas_sdhi: Support R-Car Gen[123] fallback compatibility strings
- Catch all errors when getting regulators
- Various additional improvements and cleanups"
* tag 'mmc-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (91 commits)
sdhci-fujitsu: add support for setting the CMD_DAT_DELAY attribute
dt-bindings: sdhci-fujitsu: document cmd-dat-delay property
mmc: tmio: Replace msleep() of 20ms or less with usleep_range()
mmc: dw_mmc: Convert timers to use timer_setup()
mmc: dw_mmc: Cleanup the DTO timer like the CTO one
mmc: vub300: Use common code in __download_offload_pseudocode()
mmc: tmio: Use common error handling code in tmio_mmc_host_probe()
mmc: Convert timers to use timer_setup()
mmc: sdhci-acpi: Fix voltage switch for some Intel host controllers
mmc: sdhci-acpi: Let devices define their own private data
mmc: mediatek: perfer to use rise edge latching for cmd line
mmc: mediatek: improve eMMC hs400 mode read performance
mmc: mediatek: add latch-ck support
mmc: mediatek: add support of source_cg clock
mmc: mediatek: add stop_clk fix and enhance_rx support
mmc: mediatek: add busy_check support
mmc: mediatek: add async fifo and data tune support
mmc: mediatek: add pad_tune0 support
mmc: mediatek: make hs400_tune_response only for mt8173
arm64: dts: mt8173: remove "mediatek, mt8135-mmc" from mmc nodes
...
Diffstat (limited to 'drivers/mmc/core/core.c')
-rw-r--r-- | drivers/mmc/core/core.c | 262 |
1 files changed, 235 insertions, 27 deletions
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 66c9cf49ad2f..1f0f44f4dd5f 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -266,7 +266,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
266 | host->ops->request(host, mrq); | 266 | host->ops->request(host, mrq); |
267 | } | 267 | } |
268 | 268 | ||
269 | static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq) | 269 | static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq, |
270 | bool cqe) | ||
270 | { | 271 | { |
271 | if (mrq->sbc) { | 272 | if (mrq->sbc) { |
272 | pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", | 273 | pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", |
@@ -275,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq) | |||
275 | } | 276 | } |
276 | 277 | ||
277 | if (mrq->cmd) { | 278 | if (mrq->cmd) { |
278 | pr_debug("%s: starting CMD%u arg %08x flags %08x\n", | 279 | pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n", |
279 | mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg, | 280 | mmc_hostname(host), cqe ? "CQE direct " : "", |
280 | mrq->cmd->flags); | 281 | mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); |
282 | } else if (cqe) { | ||
283 | pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n", | ||
284 | mmc_hostname(host), mrq->tag, mrq->data->blk_addr); | ||
281 | } | 285 | } |
282 | 286 | ||
283 | if (mrq->data) { | 287 | if (mrq->data) { |
@@ -333,7 +337,7 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq) | |||
333 | return 0; | 337 | return 0; |
334 | } | 338 | } |
335 | 339 | ||
336 | static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | 340 | int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) |
337 | { | 341 | { |
338 | int err; | 342 | int err; |
339 | 343 | ||
@@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
342 | if (mmc_card_removed(host->card)) | 346 | if (mmc_card_removed(host->card)) |
343 | return -ENOMEDIUM; | 347 | return -ENOMEDIUM; |
344 | 348 | ||
345 | mmc_mrq_pr_debug(host, mrq); | 349 | mmc_mrq_pr_debug(host, mrq, false); |
346 | 350 | ||
347 | WARN_ON(!host->claimed); | 351 | WARN_ON(!host->claimed); |
348 | 352 | ||
@@ -355,6 +359,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
355 | 359 | ||
356 | return 0; | 360 | return 0; |
357 | } | 361 | } |
362 | EXPORT_SYMBOL(mmc_start_request); | ||
358 | 363 | ||
359 | /* | 364 | /* |
360 | * mmc_wait_data_done() - done callback for data request | 365 | * mmc_wait_data_done() - done callback for data request |
@@ -482,6 +487,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) | |||
482 | } | 487 | } |
483 | EXPORT_SYMBOL(mmc_wait_for_req_done); | 488 | EXPORT_SYMBOL(mmc_wait_for_req_done); |
484 | 489 | ||
490 | /* | ||
491 | * mmc_cqe_start_req - Start a CQE request. | ||
492 | * @host: MMC host to start the request | ||
493 | * @mrq: request to start | ||
494 | * | ||
495 | * Start the request, re-tuning if needed and it is possible. Returns an error | ||
496 | * code if the request fails to start or -EBUSY if CQE is busy. | ||
497 | */ | ||
498 | int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) | ||
499 | { | ||
500 | int err; | ||
501 | |||
502 | /* | ||
503 | * CQE cannot process re-tuning commands. Caller must hold retuning | ||
504 | * while CQE is in use. Re-tuning can happen here only when CQE has no | ||
505 | * active requests i.e. this is the first. Note, re-tuning will call | ||
506 | * ->cqe_off(). | ||
507 | */ | ||
508 | err = mmc_retune(host); | ||
509 | if (err) | ||
510 | goto out_err; | ||
511 | |||
512 | mrq->host = host; | ||
513 | |||
514 | mmc_mrq_pr_debug(host, mrq, true); | ||
515 | |||
516 | err = mmc_mrq_prep(host, mrq); | ||
517 | if (err) | ||
518 | goto out_err; | ||
519 | |||
520 | err = host->cqe_ops->cqe_request(host, mrq); | ||
521 | if (err) | ||
522 | goto out_err; | ||
523 | |||
524 | trace_mmc_request_start(host, mrq); | ||
525 | |||
526 | return 0; | ||
527 | |||
528 | out_err: | ||
529 | if (mrq->cmd) { | ||
530 | pr_debug("%s: failed to start CQE direct CMD%u, error %d\n", | ||
531 | mmc_hostname(host), mrq->cmd->opcode, err); | ||
532 | } else { | ||
533 | pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n", | ||
534 | mmc_hostname(host), mrq->tag, err); | ||
535 | } | ||
536 | return err; | ||
537 | } | ||
538 | EXPORT_SYMBOL(mmc_cqe_start_req); | ||
539 | |||
540 | /** | ||
541 | * mmc_cqe_request_done - CQE has finished processing an MMC request | ||
542 | * @host: MMC host which completed request | ||
543 | * @mrq: MMC request which completed | ||
544 | * | ||
545 | * CQE drivers should call this function when they have completed | ||
546 | * their processing of a request. | ||
547 | */ | ||
548 | void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq) | ||
549 | { | ||
550 | mmc_should_fail_request(host, mrq); | ||
551 | |||
552 | /* Flag re-tuning needed on CRC errors */ | ||
553 | if ((mrq->cmd && mrq->cmd->error == -EILSEQ) || | ||
554 | (mrq->data && mrq->data->error == -EILSEQ)) | ||
555 | mmc_retune_needed(host); | ||
556 | |||
557 | trace_mmc_request_done(host, mrq); | ||
558 | |||
559 | if (mrq->cmd) { | ||
560 | pr_debug("%s: CQE req done (direct CMD%u): %d\n", | ||
561 | mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error); | ||
562 | } else { | ||
563 | pr_debug("%s: CQE transfer done tag %d\n", | ||
564 | mmc_hostname(host), mrq->tag); | ||
565 | } | ||
566 | |||
567 | if (mrq->data) { | ||
568 | pr_debug("%s: %d bytes transferred: %d\n", | ||
569 | mmc_hostname(host), | ||
570 | mrq->data->bytes_xfered, mrq->data->error); | ||
571 | } | ||
572 | |||
573 | mrq->done(mrq); | ||
574 | } | ||
575 | EXPORT_SYMBOL(mmc_cqe_request_done); | ||
576 | |||
577 | /** | ||
578 | * mmc_cqe_post_req - CQE post process of a completed MMC request | ||
579 | * @host: MMC host | ||
580 | * @mrq: MMC request to be processed | ||
581 | */ | ||
582 | void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq) | ||
583 | { | ||
584 | if (host->cqe_ops->cqe_post_req) | ||
585 | host->cqe_ops->cqe_post_req(host, mrq); | ||
586 | } | ||
587 | EXPORT_SYMBOL(mmc_cqe_post_req); | ||
588 | |||
589 | /* Arbitrary 1 second timeout */ | ||
590 | #define MMC_CQE_RECOVERY_TIMEOUT 1000 | ||
591 | |||
592 | /* | ||
593 | * mmc_cqe_recovery - Recover from CQE errors. | ||
594 | * @host: MMC host to recover | ||
595 | * | ||
596 | * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in | ||
597 | * in eMMC, and discarding the queue in CQE. CQE must call | ||
598 | * mmc_cqe_request_done() on all requests. An error is returned if the eMMC | ||
599 | * fails to discard its queue. | ||
600 | */ | ||
601 | int mmc_cqe_recovery(struct mmc_host *host) | ||
602 | { | ||
603 | struct mmc_command cmd; | ||
604 | int err; | ||
605 | |||
606 | mmc_retune_hold_now(host); | ||
607 | |||
608 | /* | ||
609 | * Recovery is expected seldom, if at all, but it reduces performance, | ||
610 | * so make sure it is not completely silent. | ||
611 | */ | ||
612 | pr_warn("%s: running CQE recovery\n", mmc_hostname(host)); | ||
613 | |||
614 | host->cqe_ops->cqe_recovery_start(host); | ||
615 | |||
616 | memset(&cmd, 0, sizeof(cmd)); | ||
617 | cmd.opcode = MMC_STOP_TRANSMISSION, | ||
618 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC, | ||
619 | cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ | ||
620 | cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, | ||
621 | mmc_wait_for_cmd(host, &cmd, 0); | ||
622 | |||
623 | memset(&cmd, 0, sizeof(cmd)); | ||
624 | cmd.opcode = MMC_CMDQ_TASK_MGMT; | ||
625 | cmd.arg = 1; /* Discard entire queue */ | ||
626 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | ||
627 | cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ | ||
628 | cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, | ||
629 | err = mmc_wait_for_cmd(host, &cmd, 0); | ||
630 | |||
631 | host->cqe_ops->cqe_recovery_finish(host); | ||
632 | |||
633 | mmc_retune_release(host); | ||
634 | |||
635 | return err; | ||
636 | } | ||
637 | EXPORT_SYMBOL(mmc_cqe_recovery); | ||
638 | |||
485 | /** | 639 | /** |
486 | * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done | 640 | * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done |
487 | * @host: MMC host | 641 | * @host: MMC host |
@@ -832,9 +986,36 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) | |||
832 | } | 986 | } |
833 | EXPORT_SYMBOL(mmc_align_data_size); | 987 | EXPORT_SYMBOL(mmc_align_data_size); |
834 | 988 | ||
989 | /* | ||
990 | * Allow claiming an already claimed host if the context is the same or there is | ||
991 | * no context but the task is the same. | ||
992 | */ | ||
993 | static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx, | ||
994 | struct task_struct *task) | ||
995 | { | ||
996 | return host->claimer == ctx || | ||
997 | (!ctx && task && host->claimer->task == task); | ||
998 | } | ||
999 | |||
1000 | static inline void mmc_ctx_set_claimer(struct mmc_host *host, | ||
1001 | struct mmc_ctx *ctx, | ||
1002 | struct task_struct *task) | ||
1003 | { | ||
1004 | if (!host->claimer) { | ||
1005 | if (ctx) | ||
1006 | host->claimer = ctx; | ||
1007 | else | ||
1008 | host->claimer = &host->default_ctx; | ||
1009 | } | ||
1010 | if (task) | ||
1011 | host->claimer->task = task; | ||
1012 | } | ||
1013 | |||
835 | /** | 1014 | /** |
836 | * __mmc_claim_host - exclusively claim a host | 1015 | * __mmc_claim_host - exclusively claim a host |
837 | * @host: mmc host to claim | 1016 | * @host: mmc host to claim |
1017 | * @ctx: context that claims the host or NULL in which case the default | ||
1018 | * context will be used | ||
838 | * @abort: whether or not the operation should be aborted | 1019 | * @abort: whether or not the operation should be aborted |
839 | * | 1020 | * |
840 | * Claim a host for a set of operations. If @abort is non null and | 1021 | * Claim a host for a set of operations. If @abort is non null and |
@@ -842,8 +1023,10 @@ EXPORT_SYMBOL(mmc_align_data_size); | |||
842 | * that non-zero value without acquiring the lock. Returns zero | 1023 | * that non-zero value without acquiring the lock. Returns zero |
843 | * with the lock held otherwise. | 1024 | * with the lock held otherwise. |
844 | */ | 1025 | */ |
845 | int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) | 1026 | int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx, |
1027 | atomic_t *abort) | ||
846 | { | 1028 | { |
1029 | struct task_struct *task = ctx ? NULL : current; | ||
847 | DECLARE_WAITQUEUE(wait, current); | 1030 | DECLARE_WAITQUEUE(wait, current); |
848 | unsigned long flags; | 1031 | unsigned long flags; |
849 | int stop; | 1032 | int stop; |
@@ -856,7 +1039,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) | |||
856 | while (1) { | 1039 | while (1) { |
857 | set_current_state(TASK_UNINTERRUPTIBLE); | 1040 | set_current_state(TASK_UNINTERRUPTIBLE); |
858 | stop = abort ? atomic_read(abort) : 0; | 1041 | stop = abort ? atomic_read(abort) : 0; |
859 | if (stop || !host->claimed || host->claimer == current) | 1042 | if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task)) |
860 | break; | 1043 | break; |
861 | spin_unlock_irqrestore(&host->lock, flags); | 1044 | spin_unlock_irqrestore(&host->lock, flags); |
862 | schedule(); | 1045 | schedule(); |
@@ -865,7 +1048,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) | |||
865 | set_current_state(TASK_RUNNING); | 1048 | set_current_state(TASK_RUNNING); |
866 | if (!stop) { | 1049 | if (!stop) { |
867 | host->claimed = 1; | 1050 | host->claimed = 1; |
868 | host->claimer = current; | 1051 | mmc_ctx_set_claimer(host, ctx, task); |
869 | host->claim_cnt += 1; | 1052 | host->claim_cnt += 1; |
870 | if (host->claim_cnt == 1) | 1053 | if (host->claim_cnt == 1) |
871 | pm = true; | 1054 | pm = true; |
@@ -900,6 +1083,7 @@ void mmc_release_host(struct mmc_host *host) | |||
900 | spin_unlock_irqrestore(&host->lock, flags); | 1083 | spin_unlock_irqrestore(&host->lock, flags); |
901 | } else { | 1084 | } else { |
902 | host->claimed = 0; | 1085 | host->claimed = 0; |
1086 | host->claimer->task = NULL; | ||
903 | host->claimer = NULL; | 1087 | host->claimer = NULL; |
904 | spin_unlock_irqrestore(&host->lock, flags); | 1088 | spin_unlock_irqrestore(&host->lock, flags); |
905 | wake_up(&host->wq); | 1089 | wake_up(&host->wq); |
@@ -913,10 +1097,10 @@ EXPORT_SYMBOL(mmc_release_host); | |||
913 | * This is a helper function, which fetches a runtime pm reference for the | 1097 | * This is a helper function, which fetches a runtime pm reference for the |
914 | * card device and also claims the host. | 1098 | * card device and also claims the host. |
915 | */ | 1099 | */ |
916 | void mmc_get_card(struct mmc_card *card) | 1100 | void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx) |
917 | { | 1101 | { |
918 | pm_runtime_get_sync(&card->dev); | 1102 | pm_runtime_get_sync(&card->dev); |
919 | mmc_claim_host(card->host); | 1103 | __mmc_claim_host(card->host, ctx, NULL); |
920 | } | 1104 | } |
921 | EXPORT_SYMBOL(mmc_get_card); | 1105 | EXPORT_SYMBOL(mmc_get_card); |
922 | 1106 | ||
@@ -924,9 +1108,13 @@ EXPORT_SYMBOL(mmc_get_card); | |||
924 | * This is a helper function, which releases the host and drops the runtime | 1108 | * This is a helper function, which releases the host and drops the runtime |
925 | * pm reference for the card device. | 1109 | * pm reference for the card device. |
926 | */ | 1110 | */ |
927 | void mmc_put_card(struct mmc_card *card) | 1111 | void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx) |
928 | { | 1112 | { |
929 | mmc_release_host(card->host); | 1113 | struct mmc_host *host = card->host; |
1114 | |||
1115 | WARN_ON(ctx && host->claimer != ctx); | ||
1116 | |||
1117 | mmc_release_host(host); | ||
930 | pm_runtime_mark_last_busy(&card->dev); | 1118 | pm_runtime_mark_last_busy(&card->dev); |
931 | pm_runtime_put_autosuspend(&card->dev); | 1119 | pm_runtime_put_autosuspend(&card->dev); |
932 | } | 1120 | } |
@@ -1400,6 +1588,16 @@ EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); | |||
1400 | 1588 | ||
1401 | #endif /* CONFIG_REGULATOR */ | 1589 | #endif /* CONFIG_REGULATOR */ |
1402 | 1590 | ||
1591 | /** | ||
1592 | * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host | ||
1593 | * @mmc: the host to regulate | ||
1594 | * | ||
1595 | * Returns 0 or errno. errno should be handled, it is either a critical error | ||
1596 | * or -EPROBE_DEFER. 0 means no critical error but it does not mean all | ||
1597 | * regulators have been found because they all are optional. If you require | ||
1598 | * certain regulators, you need to check separately in your driver if they got | ||
1599 | * populated after calling this function. | ||
1600 | */ | ||
1403 | int mmc_regulator_get_supply(struct mmc_host *mmc) | 1601 | int mmc_regulator_get_supply(struct mmc_host *mmc) |
1404 | { | 1602 | { |
1405 | struct device *dev = mmc_dev(mmc); | 1603 | struct device *dev = mmc_dev(mmc); |
@@ -1484,11 +1682,33 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) | |||
1484 | 1682 | ||
1485 | } | 1683 | } |
1486 | 1684 | ||
1685 | int mmc_host_set_uhs_voltage(struct mmc_host *host) | ||
1686 | { | ||
1687 | u32 clock; | ||
1688 | |||
1689 | /* | ||
1690 | * During a signal voltage level switch, the clock must be gated | ||
1691 | * for 5 ms according to the SD spec | ||
1692 | */ | ||
1693 | clock = host->ios.clock; | ||
1694 | host->ios.clock = 0; | ||
1695 | mmc_set_ios(host); | ||
1696 | |||
1697 | if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) | ||
1698 | return -EAGAIN; | ||
1699 | |||
1700 | /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ | ||
1701 | mmc_delay(10); | ||
1702 | host->ios.clock = clock; | ||
1703 | mmc_set_ios(host); | ||
1704 | |||
1705 | return 0; | ||
1706 | } | ||
1707 | |||
1487 | int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) | 1708 | int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) |
1488 | { | 1709 | { |
1489 | struct mmc_command cmd = {}; | 1710 | struct mmc_command cmd = {}; |
1490 | int err = 0; | 1711 | int err = 0; |
1491 | u32 clock; | ||
1492 | 1712 | ||
1493 | /* | 1713 | /* |
1494 | * If we cannot switch voltages, return failure so the caller | 1714 | * If we cannot switch voltages, return failure so the caller |
@@ -1520,15 +1740,8 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) | |||
1520 | err = -EAGAIN; | 1740 | err = -EAGAIN; |
1521 | goto power_cycle; | 1741 | goto power_cycle; |
1522 | } | 1742 | } |
1523 | /* | ||
1524 | * During a signal voltage level switch, the clock must be gated | ||
1525 | * for 5 ms according to the SD spec | ||
1526 | */ | ||
1527 | clock = host->ios.clock; | ||
1528 | host->ios.clock = 0; | ||
1529 | mmc_set_ios(host); | ||
1530 | 1743 | ||
1531 | if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) { | 1744 | if (mmc_host_set_uhs_voltage(host)) { |
1532 | /* | 1745 | /* |
1533 | * Voltages may not have been switched, but we've already | 1746 | * Voltages may not have been switched, but we've already |
1534 | * sent CMD11, so a power cycle is required anyway | 1747 | * sent CMD11, so a power cycle is required anyway |
@@ -1537,11 +1750,6 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) | |||
1537 | goto power_cycle; | 1750 | goto power_cycle; |
1538 | } | 1751 | } |
1539 | 1752 | ||
1540 | /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ | ||
1541 | mmc_delay(10); | ||
1542 | host->ios.clock = clock; | ||
1543 | mmc_set_ios(host); | ||
1544 | |||
1545 | /* Wait for at least 1 ms according to spec */ | 1753 | /* Wait for at least 1 ms according to spec */ |
1546 | mmc_delay(1); | 1754 | mmc_delay(1); |
1547 | 1755 | ||