diff options
Diffstat (limited to 'drivers')
40 files changed, 3748 insertions, 1927 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index f85e42224559..1ff5486213fb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -106,6 +106,16 @@ struct mmc_blk_data { | |||
106 | 106 | ||
107 | static DEFINE_MUTEX(open_lock); | 107 | static DEFINE_MUTEX(open_lock); |
108 | 108 | ||
109 | enum mmc_blk_status { | ||
110 | MMC_BLK_SUCCESS = 0, | ||
111 | MMC_BLK_PARTIAL, | ||
112 | MMC_BLK_RETRY, | ||
113 | MMC_BLK_RETRY_SINGLE, | ||
114 | MMC_BLK_DATA_ERR, | ||
115 | MMC_BLK_CMD_ERR, | ||
116 | MMC_BLK_ABORT, | ||
117 | }; | ||
118 | |||
109 | module_param(perdev_minors, int, 0444); | 119 | module_param(perdev_minors, int, 0444); |
110 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); | 120 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); |
111 | 121 | ||
@@ -427,14 +437,6 @@ static const struct block_device_operations mmc_bdops = { | |||
427 | #endif | 437 | #endif |
428 | }; | 438 | }; |
429 | 439 | ||
430 | struct mmc_blk_request { | ||
431 | struct mmc_request mrq; | ||
432 | struct mmc_command sbc; | ||
433 | struct mmc_command cmd; | ||
434 | struct mmc_command stop; | ||
435 | struct mmc_data data; | ||
436 | }; | ||
437 | |||
438 | static inline int mmc_blk_part_switch(struct mmc_card *card, | 440 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
439 | struct mmc_blk_data *md) | 441 | struct mmc_blk_data *md) |
440 | { | 442 | { |
@@ -525,7 +527,20 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) | |||
525 | return result; | 527 | return result; |
526 | } | 528 | } |
527 | 529 | ||
528 | static u32 get_card_status(struct mmc_card *card, struct request *req) | 530 | static int send_stop(struct mmc_card *card, u32 *status) |
531 | { | ||
532 | struct mmc_command cmd = {0}; | ||
533 | int err; | ||
534 | |||
535 | cmd.opcode = MMC_STOP_TRANSMISSION; | ||
536 | cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | ||
537 | err = mmc_wait_for_cmd(card->host, &cmd, 5); | ||
538 | if (err == 0) | ||
539 | *status = cmd.resp[0]; | ||
540 | return err; | ||
541 | } | ||
542 | |||
543 | static int get_card_status(struct mmc_card *card, u32 *status, int retries) | ||
529 | { | 544 | { |
530 | struct mmc_command cmd = {0}; | 545 | struct mmc_command cmd = {0}; |
531 | int err; | 546 | int err; |
@@ -534,11 +549,141 @@ static u32 get_card_status(struct mmc_card *card, struct request *req) | |||
534 | if (!mmc_host_is_spi(card->host)) | 549 | if (!mmc_host_is_spi(card->host)) |
535 | cmd.arg = card->rca << 16; | 550 | cmd.arg = card->rca << 16; |
536 | cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; | 551 | cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; |
537 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | 552 | err = mmc_wait_for_cmd(card->host, &cmd, retries); |
553 | if (err == 0) | ||
554 | *status = cmd.resp[0]; | ||
555 | return err; | ||
556 | } | ||
557 | |||
558 | #define ERR_RETRY 2 | ||
559 | #define ERR_ABORT 1 | ||
560 | #define ERR_CONTINUE 0 | ||
561 | |||
562 | static int mmc_blk_cmd_error(struct request *req, const char *name, int error, | ||
563 | bool status_valid, u32 status) | ||
564 | { | ||
565 | switch (error) { | ||
566 | case -EILSEQ: | ||
567 | /* response crc error, retry the r/w cmd */ | ||
568 | pr_err("%s: %s sending %s command, card status %#x\n", | ||
569 | req->rq_disk->disk_name, "response CRC error", | ||
570 | name, status); | ||
571 | return ERR_RETRY; | ||
572 | |||
573 | case -ETIMEDOUT: | ||
574 | pr_err("%s: %s sending %s command, card status %#x\n", | ||
575 | req->rq_disk->disk_name, "timed out", name, status); | ||
576 | |||
577 | /* If the status cmd initially failed, retry the r/w cmd */ | ||
578 | if (!status_valid) | ||
579 | return ERR_RETRY; | ||
580 | |||
581 | /* | ||
582 | * If it was a r/w cmd crc error, or illegal command | ||
583 | * (eg, issued in wrong state) then retry - we should | ||
584 | * have corrected the state problem above. | ||
585 | */ | ||
586 | if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) | ||
587 | return ERR_RETRY; | ||
588 | |||
589 | /* Otherwise abort the command */ | ||
590 | return ERR_ABORT; | ||
591 | |||
592 | default: | ||
593 | /* We don't understand the error code the driver gave us */ | ||
594 | pr_err("%s: unknown error %d sending read/write command, card status %#x\n", | ||
595 | req->rq_disk->disk_name, error, status); | ||
596 | return ERR_ABORT; | ||
597 | } | ||
598 | } | ||
599 | |||
600 | /* | ||
601 | * Initial r/w and stop cmd error recovery. | ||
602 | * We don't know whether the card received the r/w cmd or not, so try to | ||
603 | * restore things back to a sane state. Essentially, we do this as follows: | ||
604 | * - Obtain card status. If the first attempt to obtain card status fails, | ||
605 | * the status word will reflect the failed status cmd, not the failed | ||
606 | * r/w cmd. If we fail to obtain card status, it suggests we can no | ||
607 | * longer communicate with the card. | ||
608 | * - Check the card state. If the card received the cmd but there was a | ||
609 | * transient problem with the response, it might still be in a data transfer | ||
610 | * mode. Try to send it a stop command. If this fails, we can't recover. | ||
611 | * - If the r/w cmd failed due to a response CRC error, it was probably | ||
612 | * transient, so retry the cmd. | ||
613 | * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. | ||
614 | * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or | ||
615 | * illegal cmd, retry. | ||
616 | * Otherwise we don't understand what happened, so abort. | ||
617 | */ | ||
618 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | ||
619 | struct mmc_blk_request *brq) | ||
620 | { | ||
621 | bool prev_cmd_status_valid = true; | ||
622 | u32 status, stop_status = 0; | ||
623 | int err, retry; | ||
624 | |||
625 | /* | ||
626 | * Try to get card status which indicates both the card state | ||
627 | * and why there was no response. If the first attempt fails, | ||
628 | * we can't be sure the returned status is for the r/w command. | ||
629 | */ | ||
630 | for (retry = 2; retry >= 0; retry--) { | ||
631 | err = get_card_status(card, &status, 0); | ||
632 | if (!err) | ||
633 | break; | ||
634 | |||
635 | prev_cmd_status_valid = false; | ||
636 | pr_err("%s: error %d sending status command, %sing\n", | ||
637 | req->rq_disk->disk_name, err, retry ? "retry" : "abort"); | ||
638 | } | ||
639 | |||
640 | /* We couldn't get a response from the card. Give up. */ | ||
538 | if (err) | 641 | if (err) |
539 | printk(KERN_ERR "%s: error %d sending status command", | 642 | return ERR_ABORT; |
540 | req->rq_disk->disk_name, err); | 643 | |
541 | return cmd.resp[0]; | 644 | /* |
645 | * Check the current card state. If it is in some data transfer | ||
646 | * mode, tell it to stop (and hopefully transition back to TRAN.) | ||
647 | */ | ||
648 | if (R1_CURRENT_STATE(status) == R1_STATE_DATA || | ||
649 | R1_CURRENT_STATE(status) == R1_STATE_RCV) { | ||
650 | err = send_stop(card, &stop_status); | ||
651 | if (err) | ||
652 | pr_err("%s: error %d sending stop command\n", | ||
653 | req->rq_disk->disk_name, err); | ||
654 | |||
655 | /* | ||
656 | * If the stop cmd also timed out, the card is probably | ||
657 | * not present, so abort. Other errors are bad news too. | ||
658 | */ | ||
659 | if (err) | ||
660 | return ERR_ABORT; | ||
661 | } | ||
662 | |||
663 | /* Check for set block count errors */ | ||
664 | if (brq->sbc.error) | ||
665 | return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, | ||
666 | prev_cmd_status_valid, status); | ||
667 | |||
668 | /* Check for r/w command errors */ | ||
669 | if (brq->cmd.error) | ||
670 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, | ||
671 | prev_cmd_status_valid, status); | ||
672 | |||
673 | /* Now for stop errors. These aren't fatal to the transfer. */ | ||
674 | pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", | ||
675 | req->rq_disk->disk_name, brq->stop.error, | ||
676 | brq->cmd.resp[0], status); | ||
677 | |||
678 | /* | ||
679 | * Subsitute in our own stop status as this will give the error | ||
680 | * state which happened during the execution of the r/w command. | ||
681 | */ | ||
682 | if (stop_status) { | ||
683 | brq->stop.resp[0] = stop_status; | ||
684 | brq->stop.error = 0; | ||
685 | } | ||
686 | return ERR_CONTINUE; | ||
542 | } | 687 | } |
543 | 688 | ||
544 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | 689 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
@@ -669,240 +814,324 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, | |||
669 | } | 814 | } |
670 | } | 815 | } |
671 | 816 | ||
672 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | 817 | #define CMD_ERRORS \ |
818 | (R1_OUT_OF_RANGE | /* Command argument out of range */ \ | ||
819 | R1_ADDRESS_ERROR | /* Misaligned address */ \ | ||
820 | R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ | ||
821 | R1_WP_VIOLATION | /* Tried to write to protected block */ \ | ||
822 | R1_CC_ERROR | /* Card controller error */ \ | ||
823 | R1_ERROR) /* General/unknown error */ | ||
824 | |||
825 | static int mmc_blk_err_check(struct mmc_card *card, | ||
826 | struct mmc_async_req *areq) | ||
673 | { | 827 | { |
674 | struct mmc_blk_data *md = mq->data; | 828 | enum mmc_blk_status ret = MMC_BLK_SUCCESS; |
675 | struct mmc_card *card = md->queue.card; | 829 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, |
676 | struct mmc_blk_request brq; | 830 | mmc_active); |
677 | int ret = 1, disable_multi = 0; | 831 | struct mmc_blk_request *brq = &mq_mrq->brq; |
832 | struct request *req = mq_mrq->req; | ||
678 | 833 | ||
679 | /* | 834 | /* |
680 | * Reliable writes are used to implement Forced Unit Access and | 835 | * sbc.error indicates a problem with the set block count |
681 | * REQ_META accesses, and are supported only on MMCs. | 836 | * command. No data will have been transferred. |
837 | * | ||
838 | * cmd.error indicates a problem with the r/w command. No | ||
839 | * data will have been transferred. | ||
840 | * | ||
841 | * stop.error indicates a problem with the stop command. Data | ||
842 | * may have been transferred, or may still be transferring. | ||
682 | */ | 843 | */ |
683 | bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || | 844 | if (brq->sbc.error || brq->cmd.error || brq->stop.error) { |
684 | (req->cmd_flags & REQ_META)) && | 845 | switch (mmc_blk_cmd_recovery(card, req, brq)) { |
685 | (rq_data_dir(req) == WRITE) && | 846 | case ERR_RETRY: |
686 | (md->flags & MMC_BLK_REL_WR); | 847 | return MMC_BLK_RETRY; |
848 | case ERR_ABORT: | ||
849 | return MMC_BLK_ABORT; | ||
850 | case ERR_CONTINUE: | ||
851 | break; | ||
852 | } | ||
853 | } | ||
687 | 854 | ||
688 | do { | 855 | /* |
689 | struct mmc_command cmd = {0}; | 856 | * Check for errors relating to the execution of the |
690 | u32 readcmd, writecmd, status = 0; | 857 | * initial command - such as address errors. No data |
691 | 858 | * has been transferred. | |
692 | memset(&brq, 0, sizeof(struct mmc_blk_request)); | 859 | */ |
693 | brq.mrq.cmd = &brq.cmd; | 860 | if (brq->cmd.resp[0] & CMD_ERRORS) { |
694 | brq.mrq.data = &brq.data; | 861 | pr_err("%s: r/w command failed, status = %#x\n", |
695 | 862 | req->rq_disk->disk_name, brq->cmd.resp[0]); | |
696 | brq.cmd.arg = blk_rq_pos(req); | 863 | return MMC_BLK_ABORT; |
697 | if (!mmc_card_blockaddr(card)) | 864 | } |
698 | brq.cmd.arg <<= 9; | ||
699 | brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | ||
700 | brq.data.blksz = 512; | ||
701 | brq.stop.opcode = MMC_STOP_TRANSMISSION; | ||
702 | brq.stop.arg = 0; | ||
703 | brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | ||
704 | brq.data.blocks = blk_rq_sectors(req); | ||
705 | 865 | ||
706 | /* | 866 | /* |
707 | * The block layer doesn't support all sector count | 867 | * Everything else is either success, or a data error of some |
708 | * restrictions, so we need to be prepared for too big | 868 | * kind. If it was a write, we may have transitioned to |
709 | * requests. | 869 | * program mode, which we have to wait for it to complete. |
710 | */ | 870 | */ |
711 | if (brq.data.blocks > card->host->max_blk_count) | 871 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { |
712 | brq.data.blocks = card->host->max_blk_count; | 872 | u32 status; |
873 | do { | ||
874 | int err = get_card_status(card, &status, 5); | ||
875 | if (err) { | ||
876 | printk(KERN_ERR "%s: error %d requesting status\n", | ||
877 | req->rq_disk->disk_name, err); | ||
878 | return MMC_BLK_CMD_ERR; | ||
879 | } | ||
880 | /* | ||
881 | * Some cards mishandle the status bits, | ||
882 | * so make sure to check both the busy | ||
883 | * indication and the card state. | ||
884 | */ | ||
885 | } while (!(status & R1_READY_FOR_DATA) || | ||
886 | (R1_CURRENT_STATE(status) == R1_STATE_PRG)); | ||
887 | } | ||
713 | 888 | ||
714 | /* | 889 | if (brq->data.error) { |
715 | * After a read error, we redo the request one sector at a time | 890 | pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", |
716 | * in order to accurately determine which sectors can be read | 891 | req->rq_disk->disk_name, brq->data.error, |
717 | * successfully. | 892 | (unsigned)blk_rq_pos(req), |
718 | */ | 893 | (unsigned)blk_rq_sectors(req), |
719 | if (disable_multi && brq.data.blocks > 1) | 894 | brq->cmd.resp[0], brq->stop.resp[0]); |
720 | brq.data.blocks = 1; | ||
721 | 895 | ||
722 | if (brq.data.blocks > 1 || do_rel_wr) { | ||
723 | /* SPI multiblock writes terminate using a special | ||
724 | * token, not a STOP_TRANSMISSION request. | ||
725 | */ | ||
726 | if (!mmc_host_is_spi(card->host) || | ||
727 | rq_data_dir(req) == READ) | ||
728 | brq.mrq.stop = &brq.stop; | ||
729 | readcmd = MMC_READ_MULTIPLE_BLOCK; | ||
730 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | ||
731 | } else { | ||
732 | brq.mrq.stop = NULL; | ||
733 | readcmd = MMC_READ_SINGLE_BLOCK; | ||
734 | writecmd = MMC_WRITE_BLOCK; | ||
735 | } | ||
736 | if (rq_data_dir(req) == READ) { | 896 | if (rq_data_dir(req) == READ) { |
737 | brq.cmd.opcode = readcmd; | 897 | if (brq->data.blocks > 1) { |
738 | brq.data.flags |= MMC_DATA_READ; | 898 | /* Redo read one sector at a time */ |
899 | pr_warning("%s: retrying using single block read\n", | ||
900 | req->rq_disk->disk_name); | ||
901 | return MMC_BLK_RETRY_SINGLE; | ||
902 | } | ||
903 | return MMC_BLK_DATA_ERR; | ||
739 | } else { | 904 | } else { |
740 | brq.cmd.opcode = writecmd; | 905 | return MMC_BLK_CMD_ERR; |
741 | brq.data.flags |= MMC_DATA_WRITE; | ||
742 | } | 906 | } |
907 | } | ||
743 | 908 | ||
744 | if (do_rel_wr) | 909 | if (ret == MMC_BLK_SUCCESS && |
745 | mmc_apply_rel_rw(&brq, card, req); | 910 | blk_rq_bytes(req) != brq->data.bytes_xfered) |
911 | ret = MMC_BLK_PARTIAL; | ||
746 | 912 | ||
747 | /* | 913 | return ret; |
748 | * Pre-defined multi-block transfers are preferable to | 914 | } |
749 | * open ended-ones (and necessary for reliable writes). | ||
750 | * However, it is not sufficient to just send CMD23, | ||
751 | * and avoid the final CMD12, as on an error condition | ||
752 | * CMD12 (stop) needs to be sent anyway. This, coupled | ||
753 | * with Auto-CMD23 enhancements provided by some | ||
754 | * hosts, means that the complexity of dealing | ||
755 | * with this is best left to the host. If CMD23 is | ||
756 | * supported by card and host, we'll fill sbc in and let | ||
757 | * the host deal with handling it correctly. This means | ||
758 | * that for hosts that don't expose MMC_CAP_CMD23, no | ||
759 | * change of behavior will be observed. | ||
760 | * | ||
761 | * N.B: Some MMC cards experience perf degradation. | ||
762 | * We'll avoid using CMD23-bounded multiblock writes for | ||
763 | * these, while retaining features like reliable writes. | ||
764 | */ | ||
765 | 915 | ||
766 | if ((md->flags & MMC_BLK_CMD23) && | 916 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
767 | mmc_op_multi(brq.cmd.opcode) && | 917 | struct mmc_card *card, |
768 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { | 918 | int disable_multi, |
769 | brq.sbc.opcode = MMC_SET_BLOCK_COUNT; | 919 | struct mmc_queue *mq) |
770 | brq.sbc.arg = brq.data.blocks | | 920 | { |
771 | (do_rel_wr ? (1 << 31) : 0); | 921 | u32 readcmd, writecmd; |
772 | brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | 922 | struct mmc_blk_request *brq = &mqrq->brq; |
773 | brq.mrq.sbc = &brq.sbc; | 923 | struct request *req = mqrq->req; |
774 | } | 924 | struct mmc_blk_data *md = mq->data; |
775 | 925 | ||
776 | mmc_set_data_timeout(&brq.data, card); | 926 | /* |
927 | * Reliable writes are used to implement Forced Unit Access and | ||
928 | * REQ_META accesses, and are supported only on MMCs. | ||
929 | */ | ||
930 | bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || | ||
931 | (req->cmd_flags & REQ_META)) && | ||
932 | (rq_data_dir(req) == WRITE) && | ||
933 | (md->flags & MMC_BLK_REL_WR); | ||
777 | 934 | ||
778 | brq.data.sg = mq->sg; | 935 | memset(brq, 0, sizeof(struct mmc_blk_request)); |
779 | brq.data.sg_len = mmc_queue_map_sg(mq); | 936 | brq->mrq.cmd = &brq->cmd; |
937 | brq->mrq.data = &brq->data; | ||
780 | 938 | ||
781 | /* | 939 | brq->cmd.arg = blk_rq_pos(req); |
782 | * Adjust the sg list so it is the same size as the | 940 | if (!mmc_card_blockaddr(card)) |
783 | * request. | 941 | brq->cmd.arg <<= 9; |
942 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | ||
943 | brq->data.blksz = 512; | ||
944 | brq->stop.opcode = MMC_STOP_TRANSMISSION; | ||
945 | brq->stop.arg = 0; | ||
946 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | ||
947 | brq->data.blocks = blk_rq_sectors(req); | ||
948 | |||
949 | /* | ||
950 | * The block layer doesn't support all sector count | ||
951 | * restrictions, so we need to be prepared for too big | ||
952 | * requests. | ||
953 | */ | ||
954 | if (brq->data.blocks > card->host->max_blk_count) | ||
955 | brq->data.blocks = card->host->max_blk_count; | ||
956 | |||
957 | /* | ||
958 | * After a read error, we redo the request one sector at a time | ||
959 | * in order to accurately determine which sectors can be read | ||
960 | * successfully. | ||
961 | */ | ||
962 | if (disable_multi && brq->data.blocks > 1) | ||
963 | brq->data.blocks = 1; | ||
964 | |||
965 | if (brq->data.blocks > 1 || do_rel_wr) { | ||
966 | /* SPI multiblock writes terminate using a special | ||
967 | * token, not a STOP_TRANSMISSION request. | ||
784 | */ | 968 | */ |
785 | if (brq.data.blocks != blk_rq_sectors(req)) { | 969 | if (!mmc_host_is_spi(card->host) || |
786 | int i, data_size = brq.data.blocks << 9; | 970 | rq_data_dir(req) == READ) |
787 | struct scatterlist *sg; | 971 | brq->mrq.stop = &brq->stop; |
788 | 972 | readcmd = MMC_READ_MULTIPLE_BLOCK; | |
789 | for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { | 973 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; |
790 | data_size -= sg->length; | 974 | } else { |
791 | if (data_size <= 0) { | 975 | brq->mrq.stop = NULL; |
792 | sg->length += data_size; | 976 | readcmd = MMC_READ_SINGLE_BLOCK; |
793 | i++; | 977 | writecmd = MMC_WRITE_BLOCK; |
794 | break; | 978 | } |
795 | } | 979 | if (rq_data_dir(req) == READ) { |
796 | } | 980 | brq->cmd.opcode = readcmd; |
797 | brq.data.sg_len = i; | 981 | brq->data.flags |= MMC_DATA_READ; |
798 | } | 982 | } else { |
983 | brq->cmd.opcode = writecmd; | ||
984 | brq->data.flags |= MMC_DATA_WRITE; | ||
985 | } | ||
799 | 986 | ||
800 | mmc_queue_bounce_pre(mq); | 987 | if (do_rel_wr) |
988 | mmc_apply_rel_rw(brq, card, req); | ||
801 | 989 | ||
802 | mmc_wait_for_req(card->host, &brq.mrq); | 990 | /* |
991 | * Pre-defined multi-block transfers are preferable to | ||
992 | * open ended-ones (and necessary for reliable writes). | ||
993 | * However, it is not sufficient to just send CMD23, | ||
994 | * and avoid the final CMD12, as on an error condition | ||
995 | * CMD12 (stop) needs to be sent anyway. This, coupled | ||
996 | * with Auto-CMD23 enhancements provided by some | ||
997 | * hosts, means that the complexity of dealing | ||
998 | * with this is best left to the host. If CMD23 is | ||
999 | * supported by card and host, we'll fill sbc in and let | ||
1000 | * the host deal with handling it correctly. This means | ||
1001 | * that for hosts that don't expose MMC_CAP_CMD23, no | ||
1002 | * change of behavior will be observed. | ||
1003 | * | ||
1004 | * N.B: Some MMC cards experience perf degradation. | ||
1005 | * We'll avoid using CMD23-bounded multiblock writes for | ||
1006 | * these, while retaining features like reliable writes. | ||
1007 | */ | ||
803 | 1008 | ||
804 | mmc_queue_bounce_post(mq); | 1009 | if ((md->flags & MMC_BLK_CMD23) && |
1010 | mmc_op_multi(brq->cmd.opcode) && | ||
1011 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { | ||
1012 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; | ||
1013 | brq->sbc.arg = brq->data.blocks | | ||
1014 | (do_rel_wr ? (1 << 31) : 0); | ||
1015 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
1016 | brq->mrq.sbc = &brq->sbc; | ||
1017 | } | ||
805 | 1018 | ||
806 | /* | 1019 | mmc_set_data_timeout(&brq->data, card); |
807 | * Check for errors here, but don't jump to cmd_err | ||
808 | * until later as we need to wait for the card to leave | ||
809 | * programming mode even when things go wrong. | ||
810 | */ | ||
811 | if (brq.sbc.error || brq.cmd.error || | ||
812 | brq.data.error || brq.stop.error) { | ||
813 | if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { | ||
814 | /* Redo read one sector at a time */ | ||
815 | printk(KERN_WARNING "%s: retrying using single " | ||
816 | "block read\n", req->rq_disk->disk_name); | ||
817 | disable_multi = 1; | ||
818 | continue; | ||
819 | } | ||
820 | status = get_card_status(card, req); | ||
821 | } | ||
822 | 1020 | ||
823 | if (brq.sbc.error) { | 1021 | brq->data.sg = mqrq->sg; |
824 | printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT " | 1022 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); |
825 | "command, response %#x, card status %#x\n", | ||
826 | req->rq_disk->disk_name, brq.sbc.error, | ||
827 | brq.sbc.resp[0], status); | ||
828 | } | ||
829 | 1023 | ||
830 | if (brq.cmd.error) { | 1024 | /* |
831 | printk(KERN_ERR "%s: error %d sending read/write " | 1025 | * Adjust the sg list so it is the same size as the |
832 | "command, response %#x, card status %#x\n", | 1026 | * request. |
833 | req->rq_disk->disk_name, brq.cmd.error, | 1027 | */ |
834 | brq.cmd.resp[0], status); | 1028 | if (brq->data.blocks != blk_rq_sectors(req)) { |
1029 | int i, data_size = brq->data.blocks << 9; | ||
1030 | struct scatterlist *sg; | ||
1031 | |||
1032 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { | ||
1033 | data_size -= sg->length; | ||
1034 | if (data_size <= 0) { | ||
1035 | sg->length += data_size; | ||
1036 | i++; | ||
1037 | break; | ||
1038 | } | ||
835 | } | 1039 | } |
1040 | brq->data.sg_len = i; | ||
1041 | } | ||
836 | 1042 | ||
837 | if (brq.data.error) { | 1043 | mqrq->mmc_active.mrq = &brq->mrq; |
838 | if (brq.data.error == -ETIMEDOUT && brq.mrq.stop) | 1044 | mqrq->mmc_active.err_check = mmc_blk_err_check; |
839 | /* 'Stop' response contains card status */ | ||
840 | status = brq.mrq.stop->resp[0]; | ||
841 | printk(KERN_ERR "%s: error %d transferring data," | ||
842 | " sector %u, nr %u, card status %#x\n", | ||
843 | req->rq_disk->disk_name, brq.data.error, | ||
844 | (unsigned)blk_rq_pos(req), | ||
845 | (unsigned)blk_rq_sectors(req), status); | ||
846 | } | ||
847 | 1045 | ||
848 | if (brq.stop.error) { | 1046 | mmc_queue_bounce_pre(mqrq); |
849 | printk(KERN_ERR "%s: error %d sending stop command, " | 1047 | } |
850 | "response %#x, card status %#x\n", | ||
851 | req->rq_disk->disk_name, brq.stop.error, | ||
852 | brq.stop.resp[0], status); | ||
853 | } | ||
854 | 1048 | ||
855 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { | 1049 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) |
856 | do { | 1050 | { |
857 | int err; | 1051 | struct mmc_blk_data *md = mq->data; |
858 | 1052 | struct mmc_card *card = md->queue.card; | |
859 | cmd.opcode = MMC_SEND_STATUS; | 1053 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; |
860 | cmd.arg = card->rca << 16; | 1054 | int ret = 1, disable_multi = 0, retry = 0; |
861 | cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; | 1055 | enum mmc_blk_status status; |
862 | err = mmc_wait_for_cmd(card->host, &cmd, 5); | 1056 | struct mmc_queue_req *mq_rq; |
863 | if (err) { | 1057 | struct request *req; |
864 | printk(KERN_ERR "%s: error %d requesting status\n", | 1058 | struct mmc_async_req *areq; |
865 | req->rq_disk->disk_name, err); | 1059 | |
866 | goto cmd_err; | 1060 | if (!rqc && !mq->mqrq_prev->req) |
867 | } | 1061 | return 0; |
868 | /* | ||
869 | * Some cards mishandle the status bits, | ||
870 | * so make sure to check both the busy | ||
871 | * indication and the card state. | ||
872 | */ | ||
873 | } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || | ||
874 | (R1_CURRENT_STATE(cmd.resp[0]) == 7)); | ||
875 | |||
876 | #if 0 | ||
877 | if (cmd.resp[0] & ~0x00000900) | ||
878 | printk(KERN_ERR "%s: status = %08x\n", | ||
879 | req->rq_disk->disk_name, cmd.resp[0]); | ||
880 | if (mmc_decode_status(cmd.resp)) | ||
881 | goto cmd_err; | ||
882 | #endif | ||
883 | } | ||
884 | 1062 | ||
885 | if (brq.cmd.error || brq.stop.error || brq.data.error) { | 1063 | do { |
886 | if (rq_data_dir(req) == READ) { | 1064 | if (rqc) { |
1065 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | ||
1066 | areq = &mq->mqrq_cur->mmc_active; | ||
1067 | } else | ||
1068 | areq = NULL; | ||
1069 | areq = mmc_start_req(card->host, areq, (int *) &status); | ||
1070 | if (!areq) | ||
1071 | return 0; | ||
1072 | |||
1073 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); | ||
1074 | brq = &mq_rq->brq; | ||
1075 | req = mq_rq->req; | ||
1076 | mmc_queue_bounce_post(mq_rq); | ||
1077 | |||
1078 | switch (status) { | ||
1079 | case MMC_BLK_SUCCESS: | ||
1080 | case MMC_BLK_PARTIAL: | ||
1081 | /* | ||
1082 | * A block was successfully transferred. | ||
1083 | */ | ||
1084 | spin_lock_irq(&md->lock); | ||
1085 | ret = __blk_end_request(req, 0, | ||
1086 | brq->data.bytes_xfered); | ||
1087 | spin_unlock_irq(&md->lock); | ||
1088 | if (status == MMC_BLK_SUCCESS && ret) { | ||
887 | /* | 1089 | /* |
888 | * After an error, we redo I/O one sector at a | 1090 | * The blk_end_request has returned non zero |
889 | * time, so we only reach here after trying to | 1091 | * even though all data is transfered and no |
890 | * read a single sector. | 1092 | * erros returned by host. |
1093 | * If this happen it's a bug. | ||
891 | */ | 1094 | */ |
892 | spin_lock_irq(&md->lock); | 1095 | printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n", |
893 | ret = __blk_end_request(req, -EIO, brq.data.blksz); | 1096 | __func__, blk_rq_bytes(req), |
894 | spin_unlock_irq(&md->lock); | 1097 | brq->data.bytes_xfered); |
895 | continue; | 1098 | rqc = NULL; |
1099 | goto cmd_abort; | ||
896 | } | 1100 | } |
1101 | break; | ||
1102 | case MMC_BLK_CMD_ERR: | ||
897 | goto cmd_err; | 1103 | goto cmd_err; |
1104 | case MMC_BLK_RETRY_SINGLE: | ||
1105 | disable_multi = 1; | ||
1106 | break; | ||
1107 | case MMC_BLK_RETRY: | ||
1108 | if (retry++ < 5) | ||
1109 | break; | ||
1110 | case MMC_BLK_ABORT: | ||
1111 | goto cmd_abort; | ||
1112 | case MMC_BLK_DATA_ERR: | ||
1113 | /* | ||
1114 | * After an error, we redo I/O one sector at a | ||
1115 | * time, so we only reach here after trying to | ||
1116 | * read a single sector. | ||
1117 | */ | ||
1118 | spin_lock_irq(&md->lock); | ||
1119 | ret = __blk_end_request(req, -EIO, | ||
1120 | brq->data.blksz); | ||
1121 | spin_unlock_irq(&md->lock); | ||
1122 | if (!ret) | ||
1123 | goto start_new_req; | ||
1124 | break; | ||
898 | } | 1125 | } |
899 | 1126 | ||
900 | /* | 1127 | if (ret) { |
901 | * A block was successfully transferred. | 1128 | /* |
902 | */ | 1129 | * In case of a none complete request |
903 | spin_lock_irq(&md->lock); | 1130 | * prepare it again and resend. |
904 | ret = __blk_end_request(req, 0, brq.data.bytes_xfered); | 1131 | */ |
905 | spin_unlock_irq(&md->lock); | 1132 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); |
1133 | mmc_start_req(card->host, &mq_rq->mmc_active, NULL); | ||
1134 | } | ||
906 | } while (ret); | 1135 | } while (ret); |
907 | 1136 | ||
908 | return 1; | 1137 | return 1; |
@@ -927,15 +1156,22 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |||
927 | } | 1156 | } |
928 | } else { | 1157 | } else { |
929 | spin_lock_irq(&md->lock); | 1158 | spin_lock_irq(&md->lock); |
930 | ret = __blk_end_request(req, 0, brq.data.bytes_xfered); | 1159 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); |
931 | spin_unlock_irq(&md->lock); | 1160 | spin_unlock_irq(&md->lock); |
932 | } | 1161 | } |
933 | 1162 | ||
1163 | cmd_abort: | ||
934 | spin_lock_irq(&md->lock); | 1164 | spin_lock_irq(&md->lock); |
935 | while (ret) | 1165 | while (ret) |
936 | ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); | 1166 | ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); |
937 | spin_unlock_irq(&md->lock); | 1167 | spin_unlock_irq(&md->lock); |
938 | 1168 | ||
1169 | start_new_req: | ||
1170 | if (rqc) { | ||
1171 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | ||
1172 | mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); | ||
1173 | } | ||
1174 | |||
939 | return 0; | 1175 | return 0; |
940 | } | 1176 | } |
941 | 1177 | ||
@@ -945,26 +1181,37 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
945 | struct mmc_blk_data *md = mq->data; | 1181 | struct mmc_blk_data *md = mq->data; |
946 | struct mmc_card *card = md->queue.card; | 1182 | struct mmc_card *card = md->queue.card; |
947 | 1183 | ||
948 | mmc_claim_host(card->host); | 1184 | if (req && !mq->mqrq_prev->req) |
1185 | /* claim host only for the first request */ | ||
1186 | mmc_claim_host(card->host); | ||
1187 | |||
949 | ret = mmc_blk_part_switch(card, md); | 1188 | ret = mmc_blk_part_switch(card, md); |
950 | if (ret) { | 1189 | if (ret) { |
951 | ret = 0; | 1190 | ret = 0; |
952 | goto out; | 1191 | goto out; |
953 | } | 1192 | } |
954 | 1193 | ||
955 | if (req->cmd_flags & REQ_DISCARD) { | 1194 | if (req && req->cmd_flags & REQ_DISCARD) { |
1195 | /* complete ongoing async transfer before issuing discard */ | ||
1196 | if (card->host->areq) | ||
1197 | mmc_blk_issue_rw_rq(mq, NULL); | ||
956 | if (req->cmd_flags & REQ_SECURE) | 1198 | if (req->cmd_flags & REQ_SECURE) |
957 | ret = mmc_blk_issue_secdiscard_rq(mq, req); | 1199 | ret = mmc_blk_issue_secdiscard_rq(mq, req); |
958 | else | 1200 | else |
959 | ret = mmc_blk_issue_discard_rq(mq, req); | 1201 | ret = mmc_blk_issue_discard_rq(mq, req); |
960 | } else if (req->cmd_flags & REQ_FLUSH) { | 1202 | } else if (req && req->cmd_flags & REQ_FLUSH) { |
1203 | /* complete ongoing async transfer before issuing flush */ | ||
1204 | if (card->host->areq) | ||
1205 | mmc_blk_issue_rw_rq(mq, NULL); | ||
961 | ret = mmc_blk_issue_flush(mq, req); | 1206 | ret = mmc_blk_issue_flush(mq, req); |
962 | } else { | 1207 | } else { |
963 | ret = mmc_blk_issue_rw_rq(mq, req); | 1208 | ret = mmc_blk_issue_rw_rq(mq, req); |
964 | } | 1209 | } |
965 | 1210 | ||
966 | out: | 1211 | out: |
967 | mmc_release_host(card->host); | 1212 | if (!req) |
1213 | /* release host only when there are no more requests */ | ||
1214 | mmc_release_host(card->host); | ||
968 | return ret; | 1215 | return ret; |
969 | } | 1216 | } |
970 | 1217 | ||
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 233cdfae92f4..006a5e9f8ab8 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -148,6 +148,27 @@ struct mmc_test_card { | |||
148 | struct mmc_test_general_result *gr; | 148 | struct mmc_test_general_result *gr; |
149 | }; | 149 | }; |
150 | 150 | ||
151 | enum mmc_test_prep_media { | ||
152 | MMC_TEST_PREP_NONE = 0, | ||
153 | MMC_TEST_PREP_WRITE_FULL = 1 << 0, | ||
154 | MMC_TEST_PREP_ERASE = 1 << 1, | ||
155 | }; | ||
156 | |||
157 | struct mmc_test_multiple_rw { | ||
158 | unsigned int *sg_len; | ||
159 | unsigned int *bs; | ||
160 | unsigned int len; | ||
161 | unsigned int size; | ||
162 | bool do_write; | ||
163 | bool do_nonblock_req; | ||
164 | enum mmc_test_prep_media prepare; | ||
165 | }; | ||
166 | |||
167 | struct mmc_test_async_req { | ||
168 | struct mmc_async_req areq; | ||
169 | struct mmc_test_card *test; | ||
170 | }; | ||
171 | |||
151 | /*******************************************************************/ | 172 | /*******************************************************************/ |
152 | /* General helper functions */ | 173 | /* General helper functions */ |
153 | /*******************************************************************/ | 174 | /*******************************************************************/ |
@@ -367,21 +388,26 @@ out_free: | |||
367 | * Map memory into a scatterlist. Optionally allow the same memory to be | 388 | * Map memory into a scatterlist. Optionally allow the same memory to be |
368 | * mapped more than once. | 389 | * mapped more than once. |
369 | */ | 390 | */ |
370 | static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz, | 391 | static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, |
371 | struct scatterlist *sglist, int repeat, | 392 | struct scatterlist *sglist, int repeat, |
372 | unsigned int max_segs, unsigned int max_seg_sz, | 393 | unsigned int max_segs, unsigned int max_seg_sz, |
373 | unsigned int *sg_len) | 394 | unsigned int *sg_len, int min_sg_len) |
374 | { | 395 | { |
375 | struct scatterlist *sg = NULL; | 396 | struct scatterlist *sg = NULL; |
376 | unsigned int i; | 397 | unsigned int i; |
398 | unsigned long sz = size; | ||
377 | 399 | ||
378 | sg_init_table(sglist, max_segs); | 400 | sg_init_table(sglist, max_segs); |
401 | if (min_sg_len > max_segs) | ||
402 | min_sg_len = max_segs; | ||
379 | 403 | ||
380 | *sg_len = 0; | 404 | *sg_len = 0; |
381 | do { | 405 | do { |
382 | for (i = 0; i < mem->cnt; i++) { | 406 | for (i = 0; i < mem->cnt; i++) { |
383 | unsigned long len = PAGE_SIZE << mem->arr[i].order; | 407 | unsigned long len = PAGE_SIZE << mem->arr[i].order; |
384 | 408 | ||
409 | if (min_sg_len && (size / min_sg_len < len)) | ||
410 | len = ALIGN(size / min_sg_len, 512); | ||
385 | if (len > sz) | 411 | if (len > sz) |
386 | len = sz; | 412 | len = sz; |
387 | if (len > max_seg_sz) | 413 | if (len > max_seg_sz) |
@@ -554,11 +580,12 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, | |||
554 | 580 | ||
555 | printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " | 581 | printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " |
556 | "%lu.%09lu seconds (%u kB/s, %u KiB/s, " | 582 | "%lu.%09lu seconds (%u kB/s, %u KiB/s, " |
557 | "%u.%02u IOPS)\n", | 583 | "%u.%02u IOPS, sg_len %d)\n", |
558 | mmc_hostname(test->card->host), count, sectors, count, | 584 | mmc_hostname(test->card->host), count, sectors, count, |
559 | sectors >> 1, (sectors & 1 ? ".5" : ""), | 585 | sectors >> 1, (sectors & 1 ? ".5" : ""), |
560 | (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, | 586 | (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, |
561 | rate / 1000, rate / 1024, iops / 100, iops % 100); | 587 | rate / 1000, rate / 1024, iops / 100, iops % 100, |
588 | test->area.sg_len); | ||
562 | 589 | ||
563 | mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); | 590 | mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); |
564 | } | 591 | } |
@@ -661,7 +688,7 @@ static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, | |||
661 | * Checks that a normal transfer didn't have any errors | 688 | * Checks that a normal transfer didn't have any errors |
662 | */ | 689 | */ |
663 | static int mmc_test_check_result(struct mmc_test_card *test, | 690 | static int mmc_test_check_result(struct mmc_test_card *test, |
664 | struct mmc_request *mrq) | 691 | struct mmc_request *mrq) |
665 | { | 692 | { |
666 | int ret; | 693 | int ret; |
667 | 694 | ||
@@ -685,6 +712,17 @@ static int mmc_test_check_result(struct mmc_test_card *test, | |||
685 | return ret; | 712 | return ret; |
686 | } | 713 | } |
687 | 714 | ||
715 | static int mmc_test_check_result_async(struct mmc_card *card, | ||
716 | struct mmc_async_req *areq) | ||
717 | { | ||
718 | struct mmc_test_async_req *test_async = | ||
719 | container_of(areq, struct mmc_test_async_req, areq); | ||
720 | |||
721 | mmc_test_wait_busy(test_async->test); | ||
722 | |||
723 | return mmc_test_check_result(test_async->test, areq->mrq); | ||
724 | } | ||
725 | |||
688 | /* | 726 | /* |
689 | * Checks that a "short transfer" behaved as expected | 727 | * Checks that a "short transfer" behaved as expected |
690 | */ | 728 | */ |
@@ -720,6 +758,85 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test, | |||
720 | } | 758 | } |
721 | 759 | ||
722 | /* | 760 | /* |
761 | * Tests nonblock transfer with certain parameters | ||
762 | */ | ||
763 | static void mmc_test_nonblock_reset(struct mmc_request *mrq, | ||
764 | struct mmc_command *cmd, | ||
765 | struct mmc_command *stop, | ||
766 | struct mmc_data *data) | ||
767 | { | ||
768 | memset(mrq, 0, sizeof(struct mmc_request)); | ||
769 | memset(cmd, 0, sizeof(struct mmc_command)); | ||
770 | memset(data, 0, sizeof(struct mmc_data)); | ||
771 | memset(stop, 0, sizeof(struct mmc_command)); | ||
772 | |||
773 | mrq->cmd = cmd; | ||
774 | mrq->data = data; | ||
775 | mrq->stop = stop; | ||
776 | } | ||
777 | static int mmc_test_nonblock_transfer(struct mmc_test_card *test, | ||
778 | struct scatterlist *sg, unsigned sg_len, | ||
779 | unsigned dev_addr, unsigned blocks, | ||
780 | unsigned blksz, int write, int count) | ||
781 | { | ||
782 | struct mmc_request mrq1; | ||
783 | struct mmc_command cmd1; | ||
784 | struct mmc_command stop1; | ||
785 | struct mmc_data data1; | ||
786 | |||
787 | struct mmc_request mrq2; | ||
788 | struct mmc_command cmd2; | ||
789 | struct mmc_command stop2; | ||
790 | struct mmc_data data2; | ||
791 | |||
792 | struct mmc_test_async_req test_areq[2]; | ||
793 | struct mmc_async_req *done_areq; | ||
794 | struct mmc_async_req *cur_areq = &test_areq[0].areq; | ||
795 | struct mmc_async_req *other_areq = &test_areq[1].areq; | ||
796 | int i; | ||
797 | int ret; | ||
798 | |||
799 | test_areq[0].test = test; | ||
800 | test_areq[1].test = test; | ||
801 | |||
802 | mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); | ||
803 | mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2); | ||
804 | |||
805 | cur_areq->mrq = &mrq1; | ||
806 | cur_areq->err_check = mmc_test_check_result_async; | ||
807 | other_areq->mrq = &mrq2; | ||
808 | other_areq->err_check = mmc_test_check_result_async; | ||
809 | |||
810 | for (i = 0; i < count; i++) { | ||
811 | mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr, | ||
812 | blocks, blksz, write); | ||
813 | done_areq = mmc_start_req(test->card->host, cur_areq, &ret); | ||
814 | |||
815 | if (ret || (!done_areq && i > 0)) | ||
816 | goto err; | ||
817 | |||
818 | if (done_areq) { | ||
819 | if (done_areq->mrq == &mrq2) | ||
820 | mmc_test_nonblock_reset(&mrq2, &cmd2, | ||
821 | &stop2, &data2); | ||
822 | else | ||
823 | mmc_test_nonblock_reset(&mrq1, &cmd1, | ||
824 | &stop1, &data1); | ||
825 | } | ||
826 | done_areq = cur_areq; | ||
827 | cur_areq = other_areq; | ||
828 | other_areq = done_areq; | ||
829 | dev_addr += blocks; | ||
830 | } | ||
831 | |||
832 | done_areq = mmc_start_req(test->card->host, NULL, &ret); | ||
833 | |||
834 | return ret; | ||
835 | err: | ||
836 | return ret; | ||
837 | } | ||
838 | |||
839 | /* | ||
723 | * Tests a basic transfer with certain parameters | 840 | * Tests a basic transfer with certain parameters |
724 | */ | 841 | */ |
725 | static int mmc_test_simple_transfer(struct mmc_test_card *test, | 842 | static int mmc_test_simple_transfer(struct mmc_test_card *test, |
@@ -1302,7 +1419,7 @@ static int mmc_test_no_highmem(struct mmc_test_card *test) | |||
1302 | * Map sz bytes so that it can be transferred. | 1419 | * Map sz bytes so that it can be transferred. |
1303 | */ | 1420 | */ |
1304 | static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, | 1421 | static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, |
1305 | int max_scatter) | 1422 | int max_scatter, int min_sg_len) |
1306 | { | 1423 | { |
1307 | struct mmc_test_area *t = &test->area; | 1424 | struct mmc_test_area *t = &test->area; |
1308 | int err; | 1425 | int err; |
@@ -1315,7 +1432,7 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, | |||
1315 | &t->sg_len); | 1432 | &t->sg_len); |
1316 | } else { | 1433 | } else { |
1317 | err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, | 1434 | err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, |
1318 | t->max_seg_sz, &t->sg_len); | 1435 | t->max_seg_sz, &t->sg_len, min_sg_len); |
1319 | } | 1436 | } |
1320 | if (err) | 1437 | if (err) |
1321 | printk(KERN_INFO "%s: Failed to map sg list\n", | 1438 | printk(KERN_INFO "%s: Failed to map sg list\n", |
@@ -1336,14 +1453,17 @@ static int mmc_test_area_transfer(struct mmc_test_card *test, | |||
1336 | } | 1453 | } |
1337 | 1454 | ||
1338 | /* | 1455 | /* |
1339 | * Map and transfer bytes. | 1456 | * Map and transfer bytes for multiple transfers. |
1340 | */ | 1457 | */ |
1341 | static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, | 1458 | static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, |
1342 | unsigned int dev_addr, int write, int max_scatter, | 1459 | unsigned int dev_addr, int write, |
1343 | int timed) | 1460 | int max_scatter, int timed, int count, |
1461 | bool nonblock, int min_sg_len) | ||
1344 | { | 1462 | { |
1345 | struct timespec ts1, ts2; | 1463 | struct timespec ts1, ts2; |
1346 | int ret; | 1464 | int ret = 0; |
1465 | int i; | ||
1466 | struct mmc_test_area *t = &test->area; | ||
1347 | 1467 | ||
1348 | /* | 1468 | /* |
1349 | * In the case of a maximally scattered transfer, the maximum transfer | 1469 | * In the case of a maximally scattered transfer, the maximum transfer |
@@ -1361,14 +1481,21 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, | |||
1361 | sz = max_tfr; | 1481 | sz = max_tfr; |
1362 | } | 1482 | } |
1363 | 1483 | ||
1364 | ret = mmc_test_area_map(test, sz, max_scatter); | 1484 | ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len); |
1365 | if (ret) | 1485 | if (ret) |
1366 | return ret; | 1486 | return ret; |
1367 | 1487 | ||
1368 | if (timed) | 1488 | if (timed) |
1369 | getnstimeofday(&ts1); | 1489 | getnstimeofday(&ts1); |
1490 | if (nonblock) | ||
1491 | ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, | ||
1492 | dev_addr, t->blocks, 512, write, count); | ||
1493 | else | ||
1494 | for (i = 0; i < count && ret == 0; i++) { | ||
1495 | ret = mmc_test_area_transfer(test, dev_addr, write); | ||
1496 | dev_addr += sz >> 9; | ||
1497 | } | ||
1370 | 1498 | ||
1371 | ret = mmc_test_area_transfer(test, dev_addr, write); | ||
1372 | if (ret) | 1499 | if (ret) |
1373 | return ret; | 1500 | return ret; |
1374 | 1501 | ||
@@ -1376,11 +1503,19 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, | |||
1376 | getnstimeofday(&ts2); | 1503 | getnstimeofday(&ts2); |
1377 | 1504 | ||
1378 | if (timed) | 1505 | if (timed) |
1379 | mmc_test_print_rate(test, sz, &ts1, &ts2); | 1506 | mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); |
1380 | 1507 | ||
1381 | return 0; | 1508 | return 0; |
1382 | } | 1509 | } |
1383 | 1510 | ||
1511 | static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, | ||
1512 | unsigned int dev_addr, int write, int max_scatter, | ||
1513 | int timed) | ||
1514 | { | ||
1515 | return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, | ||
1516 | timed, 1, false, 0); | ||
1517 | } | ||
1518 | |||
1384 | /* | 1519 | /* |
1385 | * Write the test area entirely. | 1520 | * Write the test area entirely. |
1386 | */ | 1521 | */ |
@@ -1954,6 +2089,245 @@ static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) | |||
1954 | return mmc_test_large_seq_perf(test, 1); | 2089 | return mmc_test_large_seq_perf(test, 1); |
1955 | } | 2090 | } |
1956 | 2091 | ||
2092 | static int mmc_test_rw_multiple(struct mmc_test_card *test, | ||
2093 | struct mmc_test_multiple_rw *tdata, | ||
2094 | unsigned int reqsize, unsigned int size, | ||
2095 | int min_sg_len) | ||
2096 | { | ||
2097 | unsigned int dev_addr; | ||
2098 | struct mmc_test_area *t = &test->area; | ||
2099 | int ret = 0; | ||
2100 | |||
2101 | /* Set up test area */ | ||
2102 | if (size > mmc_test_capacity(test->card) / 2 * 512) | ||
2103 | size = mmc_test_capacity(test->card) / 2 * 512; | ||
2104 | if (reqsize > t->max_tfr) | ||
2105 | reqsize = t->max_tfr; | ||
2106 | dev_addr = mmc_test_capacity(test->card) / 4; | ||
2107 | if ((dev_addr & 0xffff0000)) | ||
2108 | dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ | ||
2109 | else | ||
2110 | dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ | ||
2111 | if (!dev_addr) | ||
2112 | goto err; | ||
2113 | |||
2114 | if (reqsize > size) | ||
2115 | return 0; | ||
2116 | |||
2117 | /* prepare test area */ | ||
2118 | if (mmc_can_erase(test->card) && | ||
2119 | tdata->prepare & MMC_TEST_PREP_ERASE) { | ||
2120 | ret = mmc_erase(test->card, dev_addr, | ||
2121 | size / 512, MMC_SECURE_ERASE_ARG); | ||
2122 | if (ret) | ||
2123 | ret = mmc_erase(test->card, dev_addr, | ||
2124 | size / 512, MMC_ERASE_ARG); | ||
2125 | if (ret) | ||
2126 | goto err; | ||
2127 | } | ||
2128 | |||
2129 | /* Run test */ | ||
2130 | ret = mmc_test_area_io_seq(test, reqsize, dev_addr, | ||
2131 | tdata->do_write, 0, 1, size / reqsize, | ||
2132 | tdata->do_nonblock_req, min_sg_len); | ||
2133 | if (ret) | ||
2134 | goto err; | ||
2135 | |||
2136 | return ret; | ||
2137 | err: | ||
2138 | printk(KERN_INFO "[%s] error\n", __func__); | ||
2139 | return ret; | ||
2140 | } | ||
2141 | |||
2142 | static int mmc_test_rw_multiple_size(struct mmc_test_card *test, | ||
2143 | struct mmc_test_multiple_rw *rw) | ||
2144 | { | ||
2145 | int ret = 0; | ||
2146 | int i; | ||
2147 | void *pre_req = test->card->host->ops->pre_req; | ||
2148 | void *post_req = test->card->host->ops->post_req; | ||
2149 | |||
2150 | if (rw->do_nonblock_req && | ||
2151 | ((!pre_req && post_req) || (pre_req && !post_req))) { | ||
2152 | printk(KERN_INFO "error: only one of pre/post is defined\n"); | ||
2153 | return -EINVAL; | ||
2154 | } | ||
2155 | |||
2156 | for (i = 0 ; i < rw->len && ret == 0; i++) { | ||
2157 | ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); | ||
2158 | if (ret) | ||
2159 | break; | ||
2160 | } | ||
2161 | return ret; | ||
2162 | } | ||
2163 | |||
2164 | static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, | ||
2165 | struct mmc_test_multiple_rw *rw) | ||
2166 | { | ||
2167 | int ret = 0; | ||
2168 | int i; | ||
2169 | |||
2170 | for (i = 0 ; i < rw->len && ret == 0; i++) { | ||
2171 | ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size, | ||
2172 | rw->sg_len[i]); | ||
2173 | if (ret) | ||
2174 | break; | ||
2175 | } | ||
2176 | return ret; | ||
2177 | } | ||
2178 | |||
2179 | /* | ||
2180 | * Multiple blocking write 4k to 4 MB chunks | ||
2181 | */ | ||
2182 | static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) | ||
2183 | { | ||
2184 | unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, | ||
2185 | 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; | ||
2186 | struct mmc_test_multiple_rw test_data = { | ||
2187 | .bs = bs, | ||
2188 | .size = TEST_AREA_MAX_SIZE, | ||
2189 | .len = ARRAY_SIZE(bs), | ||
2190 | .do_write = true, | ||
2191 | .do_nonblock_req = false, | ||
2192 | .prepare = MMC_TEST_PREP_ERASE, | ||
2193 | }; | ||
2194 | |||
2195 | return mmc_test_rw_multiple_size(test, &test_data); | ||
2196 | }; | ||
2197 | |||
2198 | /* | ||
2199 | * Multiple non-blocking write 4k to 4 MB chunks | ||
2200 | */ | ||
2201 | static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) | ||
2202 | { | ||
2203 | unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, | ||
2204 | 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; | ||
2205 | struct mmc_test_multiple_rw test_data = { | ||
2206 | .bs = bs, | ||
2207 | .size = TEST_AREA_MAX_SIZE, | ||
2208 | .len = ARRAY_SIZE(bs), | ||
2209 | .do_write = true, | ||
2210 | .do_nonblock_req = true, | ||
2211 | .prepare = MMC_TEST_PREP_ERASE, | ||
2212 | }; | ||
2213 | |||
2214 | return mmc_test_rw_multiple_size(test, &test_data); | ||
2215 | } | ||
2216 | |||
2217 | /* | ||
2218 | * Multiple blocking read 4k to 4 MB chunks | ||
2219 | */ | ||
2220 | static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) | ||
2221 | { | ||
2222 | unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, | ||
2223 | 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; | ||
2224 | struct mmc_test_multiple_rw test_data = { | ||
2225 | .bs = bs, | ||
2226 | .size = TEST_AREA_MAX_SIZE, | ||
2227 | .len = ARRAY_SIZE(bs), | ||
2228 | .do_write = false, | ||
2229 | .do_nonblock_req = false, | ||
2230 | .prepare = MMC_TEST_PREP_NONE, | ||
2231 | }; | ||
2232 | |||
2233 | return mmc_test_rw_multiple_size(test, &test_data); | ||
2234 | } | ||
2235 | |||
2236 | /* | ||
2237 | * Multiple non-blocking read 4k to 4 MB chunks | ||
2238 | */ | ||
2239 | static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) | ||
2240 | { | ||
2241 | unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, | ||
2242 | 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; | ||
2243 | struct mmc_test_multiple_rw test_data = { | ||
2244 | .bs = bs, | ||
2245 | .size = TEST_AREA_MAX_SIZE, | ||
2246 | .len = ARRAY_SIZE(bs), | ||
2247 | .do_write = false, | ||
2248 | .do_nonblock_req = true, | ||
2249 | .prepare = MMC_TEST_PREP_NONE, | ||
2250 | }; | ||
2251 | |||
2252 | return mmc_test_rw_multiple_size(test, &test_data); | ||
2253 | } | ||
2254 | |||
2255 | /* | ||
2256 | * Multiple blocking write 1 to 512 sg elements | ||
2257 | */ | ||
2258 | static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) | ||
2259 | { | ||
2260 | unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, | ||
2261 | 1 << 7, 1 << 8, 1 << 9}; | ||
2262 | struct mmc_test_multiple_rw test_data = { | ||
2263 | .sg_len = sg_len, | ||
2264 | .size = TEST_AREA_MAX_SIZE, | ||
2265 | .len = ARRAY_SIZE(sg_len), | ||
2266 | .do_write = true, | ||
2267 | .do_nonblock_req = false, | ||
2268 | .prepare = MMC_TEST_PREP_ERASE, | ||
2269 | }; | ||
2270 | |||
2271 | return mmc_test_rw_multiple_sg_len(test, &test_data); | ||
2272 | }; | ||
2273 | |||
2274 | /* | ||
2275 | * Multiple non-blocking write 1 to 512 sg elements | ||
2276 | */ | ||
2277 | static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) | ||
2278 | { | ||
2279 | unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, | ||
2280 | 1 << 7, 1 << 8, 1 << 9}; | ||
2281 | struct mmc_test_multiple_rw test_data = { | ||
2282 | .sg_len = sg_len, | ||
2283 | .size = TEST_AREA_MAX_SIZE, | ||
2284 | .len = ARRAY_SIZE(sg_len), | ||
2285 | .do_write = true, | ||
2286 | .do_nonblock_req = true, | ||
2287 | .prepare = MMC_TEST_PREP_ERASE, | ||
2288 | }; | ||
2289 | |||
2290 | return mmc_test_rw_multiple_sg_len(test, &test_data); | ||
2291 | } | ||
2292 | |||
2293 | /* | ||
2294 | * Multiple blocking read 1 to 512 sg elements | ||
2295 | */ | ||
2296 | static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) | ||
2297 | { | ||
2298 | unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, | ||
2299 | 1 << 7, 1 << 8, 1 << 9}; | ||
2300 | struct mmc_test_multiple_rw test_data = { | ||
2301 | .sg_len = sg_len, | ||
2302 | .size = TEST_AREA_MAX_SIZE, | ||
2303 | .len = ARRAY_SIZE(sg_len), | ||
2304 | .do_write = false, | ||
2305 | .do_nonblock_req = false, | ||
2306 | .prepare = MMC_TEST_PREP_NONE, | ||
2307 | }; | ||
2308 | |||
2309 | return mmc_test_rw_multiple_sg_len(test, &test_data); | ||
2310 | } | ||
2311 | |||
2312 | /* | ||
2313 | * Multiple non-blocking read 1 to 512 sg elements | ||
2314 | */ | ||
2315 | static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) | ||
2316 | { | ||
2317 | unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, | ||
2318 | 1 << 7, 1 << 8, 1 << 9}; | ||
2319 | struct mmc_test_multiple_rw test_data = { | ||
2320 | .sg_len = sg_len, | ||
2321 | .size = TEST_AREA_MAX_SIZE, | ||
2322 | .len = ARRAY_SIZE(sg_len), | ||
2323 | .do_write = false, | ||
2324 | .do_nonblock_req = true, | ||
2325 | .prepare = MMC_TEST_PREP_NONE, | ||
2326 | }; | ||
2327 | |||
2328 | return mmc_test_rw_multiple_sg_len(test, &test_data); | ||
2329 | } | ||
2330 | |||
1957 | static const struct mmc_test_case mmc_test_cases[] = { | 2331 | static const struct mmc_test_case mmc_test_cases[] = { |
1958 | { | 2332 | { |
1959 | .name = "Basic write (no data verification)", | 2333 | .name = "Basic write (no data verification)", |
@@ -2221,6 +2595,61 @@ static const struct mmc_test_case mmc_test_cases[] = { | |||
2221 | .cleanup = mmc_test_area_cleanup, | 2595 | .cleanup = mmc_test_area_cleanup, |
2222 | }, | 2596 | }, |
2223 | 2597 | ||
2598 | { | ||
2599 | .name = "Write performance with blocking req 4k to 4MB", | ||
2600 | .prepare = mmc_test_area_prepare, | ||
2601 | .run = mmc_test_profile_mult_write_blocking_perf, | ||
2602 | .cleanup = mmc_test_area_cleanup, | ||
2603 | }, | ||
2604 | |||
2605 | { | ||
2606 | .name = "Write performance with non-blocking req 4k to 4MB", | ||
2607 | .prepare = mmc_test_area_prepare, | ||
2608 | .run = mmc_test_profile_mult_write_nonblock_perf, | ||
2609 | .cleanup = mmc_test_area_cleanup, | ||
2610 | }, | ||
2611 | |||
2612 | { | ||
2613 | .name = "Read performance with blocking req 4k to 4MB", | ||
2614 | .prepare = mmc_test_area_prepare, | ||
2615 | .run = mmc_test_profile_mult_read_blocking_perf, | ||
2616 | .cleanup = mmc_test_area_cleanup, | ||
2617 | }, | ||
2618 | |||
2619 | { | ||
2620 | .name = "Read performance with non-blocking req 4k to 4MB", | ||
2621 | .prepare = mmc_test_area_prepare, | ||
2622 | .run = mmc_test_profile_mult_read_nonblock_perf, | ||
2623 | .cleanup = mmc_test_area_cleanup, | ||
2624 | }, | ||
2625 | |||
2626 | { | ||
2627 | .name = "Write performance blocking req 1 to 512 sg elems", | ||
2628 | .prepare = mmc_test_area_prepare, | ||
2629 | .run = mmc_test_profile_sglen_wr_blocking_perf, | ||
2630 | .cleanup = mmc_test_area_cleanup, | ||
2631 | }, | ||
2632 | |||
2633 | { | ||
2634 | .name = "Write performance non-blocking req 1 to 512 sg elems", | ||
2635 | .prepare = mmc_test_area_prepare, | ||
2636 | .run = mmc_test_profile_sglen_wr_nonblock_perf, | ||
2637 | .cleanup = mmc_test_area_cleanup, | ||
2638 | }, | ||
2639 | |||
2640 | { | ||
2641 | .name = "Read performance blocking req 1 to 512 sg elems", | ||
2642 | .prepare = mmc_test_area_prepare, | ||
2643 | .run = mmc_test_profile_sglen_r_blocking_perf, | ||
2644 | .cleanup = mmc_test_area_cleanup, | ||
2645 | }, | ||
2646 | |||
2647 | { | ||
2648 | .name = "Read performance non-blocking req 1 to 512 sg elems", | ||
2649 | .prepare = mmc_test_area_prepare, | ||
2650 | .run = mmc_test_profile_sglen_r_nonblock_perf, | ||
2651 | .cleanup = mmc_test_area_cleanup, | ||
2652 | }, | ||
2224 | }; | 2653 | }; |
2225 | 2654 | ||
2226 | static DEFINE_MUTEX(mmc_test_lock); | 2655 | static DEFINE_MUTEX(mmc_test_lock); |
@@ -2445,6 +2874,32 @@ static const struct file_operations mmc_test_fops_test = { | |||
2445 | .release = single_release, | 2874 | .release = single_release, |
2446 | }; | 2875 | }; |
2447 | 2876 | ||
2877 | static int mtf_testlist_show(struct seq_file *sf, void *data) | ||
2878 | { | ||
2879 | int i; | ||
2880 | |||
2881 | mutex_lock(&mmc_test_lock); | ||
2882 | |||
2883 | for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) | ||
2884 | seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name); | ||
2885 | |||
2886 | mutex_unlock(&mmc_test_lock); | ||
2887 | |||
2888 | return 0; | ||
2889 | } | ||
2890 | |||
2891 | static int mtf_testlist_open(struct inode *inode, struct file *file) | ||
2892 | { | ||
2893 | return single_open(file, mtf_testlist_show, inode->i_private); | ||
2894 | } | ||
2895 | |||
2896 | static const struct file_operations mmc_test_fops_testlist = { | ||
2897 | .open = mtf_testlist_open, | ||
2898 | .read = seq_read, | ||
2899 | .llseek = seq_lseek, | ||
2900 | .release = single_release, | ||
2901 | }; | ||
2902 | |||
2448 | static void mmc_test_free_file_test(struct mmc_card *card) | 2903 | static void mmc_test_free_file_test(struct mmc_card *card) |
2449 | { | 2904 | { |
2450 | struct mmc_test_dbgfs_file *df, *dfs; | 2905 | struct mmc_test_dbgfs_file *df, *dfs; |
@@ -2476,7 +2931,18 @@ static int mmc_test_register_file_test(struct mmc_card *card) | |||
2476 | 2931 | ||
2477 | if (IS_ERR_OR_NULL(file)) { | 2932 | if (IS_ERR_OR_NULL(file)) { |
2478 | dev_err(&card->dev, | 2933 | dev_err(&card->dev, |
2479 | "Can't create file. Perhaps debugfs is disabled.\n"); | 2934 | "Can't create test. Perhaps debugfs is disabled.\n"); |
2935 | ret = -ENODEV; | ||
2936 | goto err; | ||
2937 | } | ||
2938 | |||
2939 | if (card->debugfs_root) | ||
2940 | file = debugfs_create_file("testlist", S_IRUGO, | ||
2941 | card->debugfs_root, card, &mmc_test_fops_testlist); | ||
2942 | |||
2943 | if (IS_ERR_OR_NULL(file)) { | ||
2944 | dev_err(&card->dev, | ||
2945 | "Can't create testlist. Perhaps debugfs is disabled.\n"); | ||
2480 | ret = -ENODEV; | 2946 | ret = -ENODEV; |
2481 | goto err; | 2947 | goto err; |
2482 | } | 2948 | } |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 6413afa318d2..45fb362e3f01 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -52,14 +52,18 @@ static int mmc_queue_thread(void *d) | |||
52 | down(&mq->thread_sem); | 52 | down(&mq->thread_sem); |
53 | do { | 53 | do { |
54 | struct request *req = NULL; | 54 | struct request *req = NULL; |
55 | struct mmc_queue_req *tmp; | ||
55 | 56 | ||
56 | spin_lock_irq(q->queue_lock); | 57 | spin_lock_irq(q->queue_lock); |
57 | set_current_state(TASK_INTERRUPTIBLE); | 58 | set_current_state(TASK_INTERRUPTIBLE); |
58 | req = blk_fetch_request(q); | 59 | req = blk_fetch_request(q); |
59 | mq->req = req; | 60 | mq->mqrq_cur->req = req; |
60 | spin_unlock_irq(q->queue_lock); | 61 | spin_unlock_irq(q->queue_lock); |
61 | 62 | ||
62 | if (!req) { | 63 | if (req || mq->mqrq_prev->req) { |
64 | set_current_state(TASK_RUNNING); | ||
65 | mq->issue_fn(mq, req); | ||
66 | } else { | ||
63 | if (kthread_should_stop()) { | 67 | if (kthread_should_stop()) { |
64 | set_current_state(TASK_RUNNING); | 68 | set_current_state(TASK_RUNNING); |
65 | break; | 69 | break; |
@@ -67,11 +71,14 @@ static int mmc_queue_thread(void *d) | |||
67 | up(&mq->thread_sem); | 71 | up(&mq->thread_sem); |
68 | schedule(); | 72 | schedule(); |
69 | down(&mq->thread_sem); | 73 | down(&mq->thread_sem); |
70 | continue; | ||
71 | } | 74 | } |
72 | set_current_state(TASK_RUNNING); | ||
73 | 75 | ||
74 | mq->issue_fn(mq, req); | 76 | /* Current request becomes previous request and vice versa. */ |
77 | mq->mqrq_prev->brq.mrq.data = NULL; | ||
78 | mq->mqrq_prev->req = NULL; | ||
79 | tmp = mq->mqrq_prev; | ||
80 | mq->mqrq_prev = mq->mqrq_cur; | ||
81 | mq->mqrq_cur = tmp; | ||
75 | } while (1); | 82 | } while (1); |
76 | up(&mq->thread_sem); | 83 | up(&mq->thread_sem); |
77 | 84 | ||
@@ -97,10 +104,46 @@ static void mmc_request(struct request_queue *q) | |||
97 | return; | 104 | return; |
98 | } | 105 | } |
99 | 106 | ||
100 | if (!mq->req) | 107 | if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) |
101 | wake_up_process(mq->thread); | 108 | wake_up_process(mq->thread); |
102 | } | 109 | } |
103 | 110 | ||
111 | struct scatterlist *mmc_alloc_sg(int sg_len, int *err) | ||
112 | { | ||
113 | struct scatterlist *sg; | ||
114 | |||
115 | sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); | ||
116 | if (!sg) | ||
117 | *err = -ENOMEM; | ||
118 | else { | ||
119 | *err = 0; | ||
120 | sg_init_table(sg, sg_len); | ||
121 | } | ||
122 | |||
123 | return sg; | ||
124 | } | ||
125 | |||
126 | static void mmc_queue_setup_discard(struct request_queue *q, | ||
127 | struct mmc_card *card) | ||
128 | { | ||
129 | unsigned max_discard; | ||
130 | |||
131 | max_discard = mmc_calc_max_discard(card); | ||
132 | if (!max_discard) | ||
133 | return; | ||
134 | |||
135 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | ||
136 | q->limits.max_discard_sectors = max_discard; | ||
137 | if (card->erased_byte == 0) | ||
138 | q->limits.discard_zeroes_data = 1; | ||
139 | q->limits.discard_granularity = card->pref_erase << 9; | ||
140 | /* granularity must not be greater than max. discard */ | ||
141 | if (card->pref_erase > max_discard) | ||
142 | q->limits.discard_granularity = 0; | ||
143 | if (mmc_can_secure_erase_trim(card)) | ||
144 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); | ||
145 | } | ||
146 | |||
104 | /** | 147 | /** |
105 | * mmc_init_queue - initialise a queue structure. | 148 | * mmc_init_queue - initialise a queue structure. |
106 | * @mq: mmc queue | 149 | * @mq: mmc queue |
@@ -116,6 +159,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
116 | struct mmc_host *host = card->host; | 159 | struct mmc_host *host = card->host; |
117 | u64 limit = BLK_BOUNCE_HIGH; | 160 | u64 limit = BLK_BOUNCE_HIGH; |
118 | int ret; | 161 | int ret; |
162 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; | ||
163 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; | ||
119 | 164 | ||
120 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | 165 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
121 | limit = *mmc_dev(host)->dma_mask; | 166 | limit = *mmc_dev(host)->dma_mask; |
@@ -125,21 +170,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
125 | if (!mq->queue) | 170 | if (!mq->queue) |
126 | return -ENOMEM; | 171 | return -ENOMEM; |
127 | 172 | ||
173 | memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); | ||
174 | memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev)); | ||
175 | mq->mqrq_cur = mqrq_cur; | ||
176 | mq->mqrq_prev = mqrq_prev; | ||
128 | mq->queue->queuedata = mq; | 177 | mq->queue->queuedata = mq; |
129 | mq->req = NULL; | ||
130 | 178 | ||
131 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | 179 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
132 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); | 180 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
133 | if (mmc_can_erase(card)) { | 181 | if (mmc_can_erase(card)) |
134 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); | 182 | mmc_queue_setup_discard(mq->queue, card); |
135 | mq->queue->limits.max_discard_sectors = UINT_MAX; | ||
136 | if (card->erased_byte == 0) | ||
137 | mq->queue->limits.discard_zeroes_data = 1; | ||
138 | mq->queue->limits.discard_granularity = card->pref_erase << 9; | ||
139 | if (mmc_can_secure_erase_trim(card)) | ||
140 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, | ||
141 | mq->queue); | ||
142 | } | ||
143 | 183 | ||
144 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | 184 | #ifdef CONFIG_MMC_BLOCK_BOUNCE |
145 | if (host->max_segs == 1) { | 185 | if (host->max_segs == 1) { |
@@ -155,53 +195,64 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
155 | bouncesz = host->max_blk_count * 512; | 195 | bouncesz = host->max_blk_count * 512; |
156 | 196 | ||
157 | if (bouncesz > 512) { | 197 | if (bouncesz > 512) { |
158 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | 198 | mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
159 | if (!mq->bounce_buf) { | 199 | if (!mqrq_cur->bounce_buf) { |
200 | printk(KERN_WARNING "%s: unable to " | ||
201 | "allocate bounce cur buffer\n", | ||
202 | mmc_card_name(card)); | ||
203 | } | ||
204 | mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | ||
205 | if (!mqrq_prev->bounce_buf) { | ||
160 | printk(KERN_WARNING "%s: unable to " | 206 | printk(KERN_WARNING "%s: unable to " |
161 | "allocate bounce buffer\n", | 207 | "allocate bounce prev buffer\n", |
162 | mmc_card_name(card)); | 208 | mmc_card_name(card)); |
209 | kfree(mqrq_cur->bounce_buf); | ||
210 | mqrq_cur->bounce_buf = NULL; | ||
163 | } | 211 | } |
164 | } | 212 | } |
165 | 213 | ||
166 | if (mq->bounce_buf) { | 214 | if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { |
167 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); | 215 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
168 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); | 216 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
169 | blk_queue_max_segments(mq->queue, bouncesz / 512); | 217 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
170 | blk_queue_max_segment_size(mq->queue, bouncesz); | 218 | blk_queue_max_segment_size(mq->queue, bouncesz); |
171 | 219 | ||
172 | mq->sg = kmalloc(sizeof(struct scatterlist), | 220 | mqrq_cur->sg = mmc_alloc_sg(1, &ret); |
173 | GFP_KERNEL); | 221 | if (ret) |
174 | if (!mq->sg) { | ||
175 | ret = -ENOMEM; | ||
176 | goto cleanup_queue; | 222 | goto cleanup_queue; |
177 | } | ||
178 | sg_init_table(mq->sg, 1); | ||
179 | 223 | ||
180 | mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * | 224 | mqrq_cur->bounce_sg = |
181 | bouncesz / 512, GFP_KERNEL); | 225 | mmc_alloc_sg(bouncesz / 512, &ret); |
182 | if (!mq->bounce_sg) { | 226 | if (ret) |
183 | ret = -ENOMEM; | 227 | goto cleanup_queue; |
228 | |||
229 | mqrq_prev->sg = mmc_alloc_sg(1, &ret); | ||
230 | if (ret) | ||
231 | goto cleanup_queue; | ||
232 | |||
233 | mqrq_prev->bounce_sg = | ||
234 | mmc_alloc_sg(bouncesz / 512, &ret); | ||
235 | if (ret) | ||
184 | goto cleanup_queue; | 236 | goto cleanup_queue; |
185 | } | ||
186 | sg_init_table(mq->bounce_sg, bouncesz / 512); | ||
187 | } | 237 | } |
188 | } | 238 | } |
189 | #endif | 239 | #endif |
190 | 240 | ||
191 | if (!mq->bounce_buf) { | 241 | if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { |
192 | blk_queue_bounce_limit(mq->queue, limit); | 242 | blk_queue_bounce_limit(mq->queue, limit); |
193 | blk_queue_max_hw_sectors(mq->queue, | 243 | blk_queue_max_hw_sectors(mq->queue, |
194 | min(host->max_blk_count, host->max_req_size / 512)); | 244 | min(host->max_blk_count, host->max_req_size / 512)); |
195 | blk_queue_max_segments(mq->queue, host->max_segs); | 245 | blk_queue_max_segments(mq->queue, host->max_segs); |
196 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | 246 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
197 | 247 | ||
198 | mq->sg = kmalloc(sizeof(struct scatterlist) * | 248 | mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); |
199 | host->max_segs, GFP_KERNEL); | 249 | if (ret) |
200 | if (!mq->sg) { | 250 | goto cleanup_queue; |
201 | ret = -ENOMEM; | 251 | |
252 | |||
253 | mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); | ||
254 | if (ret) | ||
202 | goto cleanup_queue; | 255 | goto cleanup_queue; |
203 | } | ||
204 | sg_init_table(mq->sg, host->max_segs); | ||
205 | } | 256 | } |
206 | 257 | ||
207 | sema_init(&mq->thread_sem, 1); | 258 | sema_init(&mq->thread_sem, 1); |
@@ -216,16 +267,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
216 | 267 | ||
217 | return 0; | 268 | return 0; |
218 | free_bounce_sg: | 269 | free_bounce_sg: |
219 | if (mq->bounce_sg) | 270 | kfree(mqrq_cur->bounce_sg); |
220 | kfree(mq->bounce_sg); | 271 | mqrq_cur->bounce_sg = NULL; |
221 | mq->bounce_sg = NULL; | 272 | kfree(mqrq_prev->bounce_sg); |
273 | mqrq_prev->bounce_sg = NULL; | ||
274 | |||
222 | cleanup_queue: | 275 | cleanup_queue: |
223 | if (mq->sg) | 276 | kfree(mqrq_cur->sg); |
224 | kfree(mq->sg); | 277 | mqrq_cur->sg = NULL; |
225 | mq->sg = NULL; | 278 | kfree(mqrq_cur->bounce_buf); |
226 | if (mq->bounce_buf) | 279 | mqrq_cur->bounce_buf = NULL; |
227 | kfree(mq->bounce_buf); | 280 | |
228 | mq->bounce_buf = NULL; | 281 | kfree(mqrq_prev->sg); |
282 | mqrq_prev->sg = NULL; | ||
283 | kfree(mqrq_prev->bounce_buf); | ||
284 | mqrq_prev->bounce_buf = NULL; | ||
285 | |||
229 | blk_cleanup_queue(mq->queue); | 286 | blk_cleanup_queue(mq->queue); |
230 | return ret; | 287 | return ret; |
231 | } | 288 | } |
@@ -234,6 +291,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | |||
234 | { | 291 | { |
235 | struct request_queue *q = mq->queue; | 292 | struct request_queue *q = mq->queue; |
236 | unsigned long flags; | 293 | unsigned long flags; |
294 | struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; | ||
295 | struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; | ||
237 | 296 | ||
238 | /* Make sure the queue isn't suspended, as that will deadlock */ | 297 | /* Make sure the queue isn't suspended, as that will deadlock */ |
239 | mmc_queue_resume(mq); | 298 | mmc_queue_resume(mq); |
@@ -247,16 +306,23 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | |||
247 | blk_start_queue(q); | 306 | blk_start_queue(q); |
248 | spin_unlock_irqrestore(q->queue_lock, flags); | 307 | spin_unlock_irqrestore(q->queue_lock, flags); |
249 | 308 | ||
250 | if (mq->bounce_sg) | 309 | kfree(mqrq_cur->bounce_sg); |
251 | kfree(mq->bounce_sg); | 310 | mqrq_cur->bounce_sg = NULL; |
252 | mq->bounce_sg = NULL; | ||
253 | 311 | ||
254 | kfree(mq->sg); | 312 | kfree(mqrq_cur->sg); |
255 | mq->sg = NULL; | 313 | mqrq_cur->sg = NULL; |
256 | 314 | ||
257 | if (mq->bounce_buf) | 315 | kfree(mqrq_cur->bounce_buf); |
258 | kfree(mq->bounce_buf); | 316 | mqrq_cur->bounce_buf = NULL; |
259 | mq->bounce_buf = NULL; | 317 | |
318 | kfree(mqrq_prev->bounce_sg); | ||
319 | mqrq_prev->bounce_sg = NULL; | ||
320 | |||
321 | kfree(mqrq_prev->sg); | ||
322 | mqrq_prev->sg = NULL; | ||
323 | |||
324 | kfree(mqrq_prev->bounce_buf); | ||
325 | mqrq_prev->bounce_buf = NULL; | ||
260 | 326 | ||
261 | mq->card = NULL; | 327 | mq->card = NULL; |
262 | } | 328 | } |
@@ -309,27 +375,27 @@ void mmc_queue_resume(struct mmc_queue *mq) | |||
309 | /* | 375 | /* |
310 | * Prepare the sg list(s) to be handed of to the host driver | 376 | * Prepare the sg list(s) to be handed of to the host driver |
311 | */ | 377 | */ |
312 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | 378 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
313 | { | 379 | { |
314 | unsigned int sg_len; | 380 | unsigned int sg_len; |
315 | size_t buflen; | 381 | size_t buflen; |
316 | struct scatterlist *sg; | 382 | struct scatterlist *sg; |
317 | int i; | 383 | int i; |
318 | 384 | ||
319 | if (!mq->bounce_buf) | 385 | if (!mqrq->bounce_buf) |
320 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); | 386 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); |
321 | 387 | ||
322 | BUG_ON(!mq->bounce_sg); | 388 | BUG_ON(!mqrq->bounce_sg); |
323 | 389 | ||
324 | sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); | 390 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); |
325 | 391 | ||
326 | mq->bounce_sg_len = sg_len; | 392 | mqrq->bounce_sg_len = sg_len; |
327 | 393 | ||
328 | buflen = 0; | 394 | buflen = 0; |
329 | for_each_sg(mq->bounce_sg, sg, sg_len, i) | 395 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) |
330 | buflen += sg->length; | 396 | buflen += sg->length; |
331 | 397 | ||
332 | sg_init_one(mq->sg, mq->bounce_buf, buflen); | 398 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); |
333 | 399 | ||
334 | return 1; | 400 | return 1; |
335 | } | 401 | } |
@@ -338,31 +404,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | |||
338 | * If writing, bounce the data to the buffer before the request | 404 | * If writing, bounce the data to the buffer before the request |
339 | * is sent to the host driver | 405 | * is sent to the host driver |
340 | */ | 406 | */ |
341 | void mmc_queue_bounce_pre(struct mmc_queue *mq) | 407 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) |
342 | { | 408 | { |
343 | if (!mq->bounce_buf) | 409 | if (!mqrq->bounce_buf) |
344 | return; | 410 | return; |
345 | 411 | ||
346 | if (rq_data_dir(mq->req) != WRITE) | 412 | if (rq_data_dir(mqrq->req) != WRITE) |
347 | return; | 413 | return; |
348 | 414 | ||
349 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | 415 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
350 | mq->bounce_buf, mq->sg[0].length); | 416 | mqrq->bounce_buf, mqrq->sg[0].length); |
351 | } | 417 | } |
352 | 418 | ||
353 | /* | 419 | /* |
354 | * If reading, bounce the data from the buffer after the request | 420 | * If reading, bounce the data from the buffer after the request |
355 | * has been handled by the host driver | 421 | * has been handled by the host driver |
356 | */ | 422 | */ |
357 | void mmc_queue_bounce_post(struct mmc_queue *mq) | 423 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) |
358 | { | 424 | { |
359 | if (!mq->bounce_buf) | 425 | if (!mqrq->bounce_buf) |
360 | return; | 426 | return; |
361 | 427 | ||
362 | if (rq_data_dir(mq->req) != READ) | 428 | if (rq_data_dir(mqrq->req) != READ) |
363 | return; | 429 | return; |
364 | 430 | ||
365 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | 431 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
366 | mq->bounce_buf, mq->sg[0].length); | 432 | mqrq->bounce_buf, mqrq->sg[0].length); |
367 | } | 433 | } |
368 | |||
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 6223ef8dc9cd..d2a1eb4b9f9f 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
@@ -4,19 +4,35 @@ | |||
4 | struct request; | 4 | struct request; |
5 | struct task_struct; | 5 | struct task_struct; |
6 | 6 | ||
7 | struct mmc_blk_request { | ||
8 | struct mmc_request mrq; | ||
9 | struct mmc_command sbc; | ||
10 | struct mmc_command cmd; | ||
11 | struct mmc_command stop; | ||
12 | struct mmc_data data; | ||
13 | }; | ||
14 | |||
15 | struct mmc_queue_req { | ||
16 | struct request *req; | ||
17 | struct mmc_blk_request brq; | ||
18 | struct scatterlist *sg; | ||
19 | char *bounce_buf; | ||
20 | struct scatterlist *bounce_sg; | ||
21 | unsigned int bounce_sg_len; | ||
22 | struct mmc_async_req mmc_active; | ||
23 | }; | ||
24 | |||
7 | struct mmc_queue { | 25 | struct mmc_queue { |
8 | struct mmc_card *card; | 26 | struct mmc_card *card; |
9 | struct task_struct *thread; | 27 | struct task_struct *thread; |
10 | struct semaphore thread_sem; | 28 | struct semaphore thread_sem; |
11 | unsigned int flags; | 29 | unsigned int flags; |
12 | struct request *req; | ||
13 | int (*issue_fn)(struct mmc_queue *, struct request *); | 30 | int (*issue_fn)(struct mmc_queue *, struct request *); |
14 | void *data; | 31 | void *data; |
15 | struct request_queue *queue; | 32 | struct request_queue *queue; |
16 | struct scatterlist *sg; | 33 | struct mmc_queue_req mqrq[2]; |
17 | char *bounce_buf; | 34 | struct mmc_queue_req *mqrq_cur; |
18 | struct scatterlist *bounce_sg; | 35 | struct mmc_queue_req *mqrq_prev; |
19 | unsigned int bounce_sg_len; | ||
20 | }; | 36 | }; |
21 | 37 | ||
22 | extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, | 38 | extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, |
@@ -25,8 +41,9 @@ extern void mmc_cleanup_queue(struct mmc_queue *); | |||
25 | extern void mmc_queue_suspend(struct mmc_queue *); | 41 | extern void mmc_queue_suspend(struct mmc_queue *); |
26 | extern void mmc_queue_resume(struct mmc_queue *); | 42 | extern void mmc_queue_resume(struct mmc_queue *); |
27 | 43 | ||
28 | extern unsigned int mmc_queue_map_sg(struct mmc_queue *); | 44 | extern unsigned int mmc_queue_map_sg(struct mmc_queue *, |
29 | extern void mmc_queue_bounce_pre(struct mmc_queue *); | 45 | struct mmc_queue_req *); |
30 | extern void mmc_queue_bounce_post(struct mmc_queue *); | 46 | extern void mmc_queue_bounce_pre(struct mmc_queue_req *); |
47 | extern void mmc_queue_bounce_post(struct mmc_queue_req *); | ||
31 | 48 | ||
32 | #endif | 49 | #endif |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 7843efe22359..f091b43d00c4 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -198,9 +198,109 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) | |||
198 | 198 | ||
199 | static void mmc_wait_done(struct mmc_request *mrq) | 199 | static void mmc_wait_done(struct mmc_request *mrq) |
200 | { | 200 | { |
201 | complete(mrq->done_data); | 201 | complete(&mrq->completion); |
202 | } | 202 | } |
203 | 203 | ||
204 | static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) | ||
205 | { | ||
206 | init_completion(&mrq->completion); | ||
207 | mrq->done = mmc_wait_done; | ||
208 | mmc_start_request(host, mrq); | ||
209 | } | ||
210 | |||
211 | static void mmc_wait_for_req_done(struct mmc_host *host, | ||
212 | struct mmc_request *mrq) | ||
213 | { | ||
214 | wait_for_completion(&mrq->completion); | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * mmc_pre_req - Prepare for a new request | ||
219 | * @host: MMC host to prepare command | ||
220 | * @mrq: MMC request to prepare for | ||
221 | * @is_first_req: true if there is no previous started request | ||
222 | * that may run in parellel to this call, otherwise false | ||
223 | * | ||
224 | * mmc_pre_req() is called in prior to mmc_start_req() to let | ||
225 | * host prepare for the new request. Preparation of a request may be | ||
226 | * performed while another request is running on the host. | ||
227 | */ | ||
228 | static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, | ||
229 | bool is_first_req) | ||
230 | { | ||
231 | if (host->ops->pre_req) | ||
232 | host->ops->pre_req(host, mrq, is_first_req); | ||
233 | } | ||
234 | |||
235 | /** | ||
236 | * mmc_post_req - Post process a completed request | ||
237 | * @host: MMC host to post process command | ||
238 | * @mrq: MMC request to post process for | ||
239 | * @err: Error, if non zero, clean up any resources made in pre_req | ||
240 | * | ||
241 | * Let the host post process a completed request. Post processing of | ||
242 | * a request may be performed while another reuqest is running. | ||
243 | */ | ||
244 | static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, | ||
245 | int err) | ||
246 | { | ||
247 | if (host->ops->post_req) | ||
248 | host->ops->post_req(host, mrq, err); | ||
249 | } | ||
250 | |||
251 | /** | ||
252 | * mmc_start_req - start a non-blocking request | ||
253 | * @host: MMC host to start command | ||
254 | * @areq: async request to start | ||
255 | * @error: out parameter returns 0 for success, otherwise non zero | ||
256 | * | ||
257 | * Start a new MMC custom command request for a host. | ||
258 | * If there is on ongoing async request wait for completion | ||
259 | * of that request and start the new one and return. | ||
260 | * Does not wait for the new request to complete. | ||
261 | * | ||
262 | * Returns the completed request, NULL in case of none completed. | ||
263 | * Wait for the an ongoing request (previoulsy started) to complete and | ||
264 | * return the completed request. If there is no ongoing request, NULL | ||
265 | * is returned without waiting. NULL is not an error condition. | ||
266 | */ | ||
267 | struct mmc_async_req *mmc_start_req(struct mmc_host *host, | ||
268 | struct mmc_async_req *areq, int *error) | ||
269 | { | ||
270 | int err = 0; | ||
271 | struct mmc_async_req *data = host->areq; | ||
272 | |||
273 | /* Prepare a new request */ | ||
274 | if (areq) | ||
275 | mmc_pre_req(host, areq->mrq, !host->areq); | ||
276 | |||
277 | if (host->areq) { | ||
278 | mmc_wait_for_req_done(host, host->areq->mrq); | ||
279 | err = host->areq->err_check(host->card, host->areq); | ||
280 | if (err) { | ||
281 | mmc_post_req(host, host->areq->mrq, 0); | ||
282 | if (areq) | ||
283 | mmc_post_req(host, areq->mrq, -EINVAL); | ||
284 | |||
285 | host->areq = NULL; | ||
286 | goto out; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | if (areq) | ||
291 | __mmc_start_req(host, areq->mrq); | ||
292 | |||
293 | if (host->areq) | ||
294 | mmc_post_req(host, host->areq->mrq, 0); | ||
295 | |||
296 | host->areq = areq; | ||
297 | out: | ||
298 | if (error) | ||
299 | *error = err; | ||
300 | return data; | ||
301 | } | ||
302 | EXPORT_SYMBOL(mmc_start_req); | ||
303 | |||
204 | /** | 304 | /** |
205 | * mmc_wait_for_req - start a request and wait for completion | 305 | * mmc_wait_for_req - start a request and wait for completion |
206 | * @host: MMC host to start command | 306 | * @host: MMC host to start command |
@@ -212,16 +312,9 @@ static void mmc_wait_done(struct mmc_request *mrq) | |||
212 | */ | 312 | */ |
213 | void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) | 313 | void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) |
214 | { | 314 | { |
215 | DECLARE_COMPLETION_ONSTACK(complete); | 315 | __mmc_start_req(host, mrq); |
216 | 316 | mmc_wait_for_req_done(host, mrq); | |
217 | mrq->done_data = &complete; | ||
218 | mrq->done = mmc_wait_done; | ||
219 | |||
220 | mmc_start_request(host, mrq); | ||
221 | |||
222 | wait_for_completion(&complete); | ||
223 | } | 317 | } |
224 | |||
225 | EXPORT_SYMBOL(mmc_wait_for_req); | 318 | EXPORT_SYMBOL(mmc_wait_for_req); |
226 | 319 | ||
227 | /** | 320 | /** |
@@ -1516,6 +1609,82 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, | |||
1516 | } | 1609 | } |
1517 | EXPORT_SYMBOL(mmc_erase_group_aligned); | 1610 | EXPORT_SYMBOL(mmc_erase_group_aligned); |
1518 | 1611 | ||
1612 | static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, | ||
1613 | unsigned int arg) | ||
1614 | { | ||
1615 | struct mmc_host *host = card->host; | ||
1616 | unsigned int max_discard, x, y, qty = 0, max_qty, timeout; | ||
1617 | unsigned int last_timeout = 0; | ||
1618 | |||
1619 | if (card->erase_shift) | ||
1620 | max_qty = UINT_MAX >> card->erase_shift; | ||
1621 | else if (mmc_card_sd(card)) | ||
1622 | max_qty = UINT_MAX; | ||
1623 | else | ||
1624 | max_qty = UINT_MAX / card->erase_size; | ||
1625 | |||
1626 | /* Find the largest qty with an OK timeout */ | ||
1627 | do { | ||
1628 | y = 0; | ||
1629 | for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { | ||
1630 | timeout = mmc_erase_timeout(card, arg, qty + x); | ||
1631 | if (timeout > host->max_discard_to) | ||
1632 | break; | ||
1633 | if (timeout < last_timeout) | ||
1634 | break; | ||
1635 | last_timeout = timeout; | ||
1636 | y = x; | ||
1637 | } | ||
1638 | qty += y; | ||
1639 | } while (y); | ||
1640 | |||
1641 | if (!qty) | ||
1642 | return 0; | ||
1643 | |||
1644 | if (qty == 1) | ||
1645 | return 1; | ||
1646 | |||
1647 | /* Convert qty to sectors */ | ||
1648 | if (card->erase_shift) | ||
1649 | max_discard = --qty << card->erase_shift; | ||
1650 | else if (mmc_card_sd(card)) | ||
1651 | max_discard = qty; | ||
1652 | else | ||
1653 | max_discard = --qty * card->erase_size; | ||
1654 | |||
1655 | return max_discard; | ||
1656 | } | ||
1657 | |||
1658 | unsigned int mmc_calc_max_discard(struct mmc_card *card) | ||
1659 | { | ||
1660 | struct mmc_host *host = card->host; | ||
1661 | unsigned int max_discard, max_trim; | ||
1662 | |||
1663 | if (!host->max_discard_to) | ||
1664 | return UINT_MAX; | ||
1665 | |||
1666 | /* | ||
1667 | * Without erase_group_def set, MMC erase timeout depends on clock | ||
1668 | * frequence which can change. In that case, the best choice is | ||
1669 | * just the preferred erase size. | ||
1670 | */ | ||
1671 | if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) | ||
1672 | return card->pref_erase; | ||
1673 | |||
1674 | max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); | ||
1675 | if (mmc_can_trim(card)) { | ||
1676 | max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); | ||
1677 | if (max_trim < max_discard) | ||
1678 | max_discard = max_trim; | ||
1679 | } else if (max_discard < card->erase_size) { | ||
1680 | max_discard = 0; | ||
1681 | } | ||
1682 | pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", | ||
1683 | mmc_hostname(host), max_discard, host->max_discard_to); | ||
1684 | return max_discard; | ||
1685 | } | ||
1686 | EXPORT_SYMBOL(mmc_calc_max_discard); | ||
1687 | |||
1519 | int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) | 1688 | int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) |
1520 | { | 1689 | { |
1521 | struct mmc_command cmd = {0}; | 1690 | struct mmc_command cmd = {0}; |
@@ -1663,6 +1832,10 @@ int mmc_power_save_host(struct mmc_host *host) | |||
1663 | { | 1832 | { |
1664 | int ret = 0; | 1833 | int ret = 0; |
1665 | 1834 | ||
1835 | #ifdef CONFIG_MMC_DEBUG | ||
1836 | pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); | ||
1837 | #endif | ||
1838 | |||
1666 | mmc_bus_get(host); | 1839 | mmc_bus_get(host); |
1667 | 1840 | ||
1668 | if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { | 1841 | if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { |
@@ -1685,6 +1858,10 @@ int mmc_power_restore_host(struct mmc_host *host) | |||
1685 | { | 1858 | { |
1686 | int ret; | 1859 | int ret; |
1687 | 1860 | ||
1861 | #ifdef CONFIG_MMC_DEBUG | ||
1862 | pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); | ||
1863 | #endif | ||
1864 | |||
1688 | mmc_bus_get(host); | 1865 | mmc_bus_get(host); |
1689 | 1866 | ||
1690 | if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { | 1867 | if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { |
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index ff2774128aa9..633975ff2bb3 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c | |||
@@ -409,52 +409,62 @@ out: | |||
409 | 409 | ||
410 | static int sd_select_driver_type(struct mmc_card *card, u8 *status) | 410 | static int sd_select_driver_type(struct mmc_card *card, u8 *status) |
411 | { | 411 | { |
412 | int host_drv_type = 0, card_drv_type = 0; | 412 | int host_drv_type = SD_DRIVER_TYPE_B; |
413 | int card_drv_type = SD_DRIVER_TYPE_B; | ||
414 | int drive_strength; | ||
413 | int err; | 415 | int err; |
414 | 416 | ||
415 | /* | 417 | /* |
416 | * If the host doesn't support any of the Driver Types A,C or D, | 418 | * If the host doesn't support any of the Driver Types A,C or D, |
417 | * default Driver Type B is used. | 419 | * or there is no board specific handler then default Driver |
420 | * Type B is used. | ||
418 | */ | 421 | */ |
419 | if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C | 422 | if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C |
420 | | MMC_CAP_DRIVER_TYPE_D))) | 423 | | MMC_CAP_DRIVER_TYPE_D))) |
421 | return 0; | 424 | return 0; |
422 | 425 | ||
423 | if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) { | 426 | if (!card->host->ops->select_drive_strength) |
424 | host_drv_type = MMC_SET_DRIVER_TYPE_A; | 427 | return 0; |
425 | if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) | 428 | |
426 | card_drv_type = MMC_SET_DRIVER_TYPE_A; | 429 | if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) |
427 | else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B) | 430 | host_drv_type |= SD_DRIVER_TYPE_A; |
428 | card_drv_type = MMC_SET_DRIVER_TYPE_B; | 431 | |
429 | else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) | 432 | if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) |
430 | card_drv_type = MMC_SET_DRIVER_TYPE_C; | 433 | host_drv_type |= SD_DRIVER_TYPE_C; |
431 | } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) { | 434 | |
432 | host_drv_type = MMC_SET_DRIVER_TYPE_C; | 435 | if (card->host->caps & MMC_CAP_DRIVER_TYPE_D) |
433 | if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) | 436 | host_drv_type |= SD_DRIVER_TYPE_D; |
434 | card_drv_type = MMC_SET_DRIVER_TYPE_C; | 437 | |
435 | } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) { | 438 | if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) |
436 | /* | 439 | card_drv_type |= SD_DRIVER_TYPE_A; |
437 | * If we are here, that means only the default driver type | 440 | |
438 | * B is supported by the host. | 441 | if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) |
439 | */ | 442 | card_drv_type |= SD_DRIVER_TYPE_C; |
440 | host_drv_type = MMC_SET_DRIVER_TYPE_B; | 443 | |
441 | if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B) | 444 | if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D) |
442 | card_drv_type = MMC_SET_DRIVER_TYPE_B; | 445 | card_drv_type |= SD_DRIVER_TYPE_D; |
443 | else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) | 446 | |
444 | card_drv_type = MMC_SET_DRIVER_TYPE_C; | 447 | /* |
445 | } | 448 | * The drive strength that the hardware can support |
449 | * depends on the board design. Pass the appropriate | ||
450 | * information and let the hardware specific code | ||
451 | * return what is possible given the options | ||
452 | */ | ||
453 | drive_strength = card->host->ops->select_drive_strength( | ||
454 | card->sw_caps.uhs_max_dtr, | ||
455 | host_drv_type, card_drv_type); | ||
446 | 456 | ||
447 | err = mmc_sd_switch(card, 1, 2, card_drv_type, status); | 457 | err = mmc_sd_switch(card, 1, 2, drive_strength, status); |
448 | if (err) | 458 | if (err) |
449 | return err; | 459 | return err; |
450 | 460 | ||
451 | if ((status[15] & 0xF) != card_drv_type) { | 461 | if ((status[15] & 0xF) != drive_strength) { |
452 | printk(KERN_WARNING "%s: Problem setting driver strength!\n", | 462 | printk(KERN_WARNING "%s: Problem setting drive strength!\n", |
453 | mmc_hostname(card->host)); | 463 | mmc_hostname(card->host)); |
454 | return 0; | 464 | return 0; |
455 | } | 465 | } |
456 | 466 | ||
457 | mmc_set_driver_type(card->host, host_drv_type); | 467 | mmc_set_driver_type(card->host, drive_strength); |
458 | 468 | ||
459 | return 0; | 469 | return 0; |
460 | } | 470 | } |
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index d2565df8a7fb..e4e6822d09e3 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c | |||
@@ -167,11 +167,8 @@ static int sdio_bus_remove(struct device *dev) | |||
167 | int ret = 0; | 167 | int ret = 0; |
168 | 168 | ||
169 | /* Make sure card is powered before invoking ->remove() */ | 169 | /* Make sure card is powered before invoking ->remove() */ |
170 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { | 170 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) |
171 | ret = pm_runtime_get_sync(dev); | 171 | pm_runtime_get_sync(dev); |
172 | if (ret < 0) | ||
173 | goto out; | ||
174 | } | ||
175 | 172 | ||
176 | drv->remove(func); | 173 | drv->remove(func); |
177 | 174 | ||
@@ -191,7 +188,6 @@ static int sdio_bus_remove(struct device *dev) | |||
191 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) | 188 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) |
192 | pm_runtime_put_sync(dev); | 189 | pm_runtime_put_sync(dev); |
193 | 190 | ||
194 | out: | ||
195 | return ret; | 191 | return ret; |
196 | } | 192 | } |
197 | 193 | ||
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 56dbf3f6ad08..8c87096531e9 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -81,28 +81,32 @@ config MMC_RICOH_MMC | |||
81 | 81 | ||
82 | If unsure, say Y. | 82 | If unsure, say Y. |
83 | 83 | ||
84 | config MMC_SDHCI_OF | 84 | config MMC_SDHCI_PLTFM |
85 | tristate "SDHCI support on OpenFirmware platforms" | 85 | tristate "SDHCI platform and OF driver helper" |
86 | depends on MMC_SDHCI && OF | 86 | depends on MMC_SDHCI |
87 | help | 87 | help |
88 | This selects the OF support for Secure Digital Host Controller | 88 | This selects the common helper functions support for Secure Digital |
89 | Interfaces. | 89 | Host Controller Interface based platform and OF drivers. |
90 | |||
91 | If you have a controller with this interface, say Y or M here. | ||
90 | 92 | ||
91 | If unsure, say N. | 93 | If unsure, say N. |
92 | 94 | ||
93 | config MMC_SDHCI_OF_ESDHC | 95 | config MMC_SDHCI_OF_ESDHC |
94 | bool "SDHCI OF support for the Freescale eSDHC controller" | 96 | tristate "SDHCI OF support for the Freescale eSDHC controller" |
95 | depends on MMC_SDHCI_OF | 97 | depends on MMC_SDHCI_PLTFM |
96 | depends on PPC_OF | 98 | depends on PPC_OF |
97 | select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER | 99 | select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER |
98 | help | 100 | help |
99 | This selects the Freescale eSDHC controller support. | 101 | This selects the Freescale eSDHC controller support. |
100 | 102 | ||
103 | If you have a controller with this interface, say Y or M here. | ||
104 | |||
101 | If unsure, say N. | 105 | If unsure, say N. |
102 | 106 | ||
103 | config MMC_SDHCI_OF_HLWD | 107 | config MMC_SDHCI_OF_HLWD |
104 | bool "SDHCI OF support for the Nintendo Wii SDHCI controllers" | 108 | tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers" |
105 | depends on MMC_SDHCI_OF | 109 | depends on MMC_SDHCI_PLTFM |
106 | depends on PPC_OF | 110 | depends on PPC_OF |
107 | select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER | 111 | select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER |
108 | help | 112 | help |
@@ -110,40 +114,36 @@ config MMC_SDHCI_OF_HLWD | |||
110 | found in the "Hollywood" chipset of the Nintendo Wii video game | 114 | found in the "Hollywood" chipset of the Nintendo Wii video game |
111 | console. | 115 | console. |
112 | 116 | ||
113 | If unsure, say N. | ||
114 | |||
115 | config MMC_SDHCI_PLTFM | ||
116 | tristate "SDHCI support on the platform specific bus" | ||
117 | depends on MMC_SDHCI | ||
118 | help | ||
119 | This selects the platform specific bus support for Secure Digital Host | ||
120 | Controller Interface. | ||
121 | |||
122 | If you have a controller with this interface, say Y or M here. | 117 | If you have a controller with this interface, say Y or M here. |
123 | 118 | ||
124 | If unsure, say N. | 119 | If unsure, say N. |
125 | 120 | ||
126 | config MMC_SDHCI_CNS3XXX | 121 | config MMC_SDHCI_CNS3XXX |
127 | bool "SDHCI support on the Cavium Networks CNS3xxx SoC" | 122 | tristate "SDHCI support on the Cavium Networks CNS3xxx SoC" |
128 | depends on ARCH_CNS3XXX | 123 | depends on ARCH_CNS3XXX |
129 | depends on MMC_SDHCI_PLTFM | 124 | depends on MMC_SDHCI_PLTFM |
130 | help | 125 | help |
131 | This selects the SDHCI support for CNS3xxx System-on-Chip devices. | 126 | This selects the SDHCI support for CNS3xxx System-on-Chip devices. |
132 | 127 | ||
128 | If you have a controller with this interface, say Y or M here. | ||
129 | |||
133 | If unsure, say N. | 130 | If unsure, say N. |
134 | 131 | ||
135 | config MMC_SDHCI_ESDHC_IMX | 132 | config MMC_SDHCI_ESDHC_IMX |
136 | bool "SDHCI platform support for the Freescale eSDHC i.MX controller" | 133 | tristate "SDHCI platform support for the Freescale eSDHC i.MX controller" |
137 | depends on MMC_SDHCI_PLTFM && (ARCH_MX25 || ARCH_MX35 || ARCH_MX5) | 134 | depends on ARCH_MX25 || ARCH_MX35 || ARCH_MX5 |
135 | depends on MMC_SDHCI_PLTFM | ||
138 | select MMC_SDHCI_IO_ACCESSORS | 136 | select MMC_SDHCI_IO_ACCESSORS |
139 | help | 137 | help |
140 | This selects the Freescale eSDHC controller support on the platform | 138 | This selects the Freescale eSDHC controller support on the platform |
141 | bus, found on platforms like mx35/51. | 139 | bus, found on platforms like mx35/51. |
142 | 140 | ||
141 | If you have a controller with this interface, say Y or M here. | ||
142 | |||
143 | If unsure, say N. | 143 | If unsure, say N. |
144 | 144 | ||
145 | config MMC_SDHCI_DOVE | 145 | config MMC_SDHCI_DOVE |
146 | bool "SDHCI support on Marvell's Dove SoC" | 146 | tristate "SDHCI support on Marvell's Dove SoC" |
147 | depends on ARCH_DOVE | 147 | depends on ARCH_DOVE |
148 | depends on MMC_SDHCI_PLTFM | 148 | depends on MMC_SDHCI_PLTFM |
149 | select MMC_SDHCI_IO_ACCESSORS | 149 | select MMC_SDHCI_IO_ACCESSORS |
@@ -151,11 +151,14 @@ config MMC_SDHCI_DOVE | |||
151 | This selects the Secure Digital Host Controller Interface in | 151 | This selects the Secure Digital Host Controller Interface in |
152 | Marvell's Dove SoC. | 152 | Marvell's Dove SoC. |
153 | 153 | ||
154 | If you have a controller with this interface, say Y or M here. | ||
155 | |||
154 | If unsure, say N. | 156 | If unsure, say N. |
155 | 157 | ||
156 | config MMC_SDHCI_TEGRA | 158 | config MMC_SDHCI_TEGRA |
157 | bool "SDHCI platform support for the Tegra SD/MMC Controller" | 159 | tristate "SDHCI platform support for the Tegra SD/MMC Controller" |
158 | depends on MMC_SDHCI_PLTFM && ARCH_TEGRA | 160 | depends on ARCH_TEGRA |
161 | depends on MMC_SDHCI_PLTFM | ||
159 | select MMC_SDHCI_IO_ACCESSORS | 162 | select MMC_SDHCI_IO_ACCESSORS |
160 | help | 163 | help |
161 | This selects the Tegra SD/MMC controller. If you have a Tegra | 164 | This selects the Tegra SD/MMC controller. If you have a Tegra |
@@ -178,14 +181,28 @@ config MMC_SDHCI_S3C | |||
178 | 181 | ||
179 | If unsure, say N. | 182 | If unsure, say N. |
180 | 183 | ||
181 | config MMC_SDHCI_PXA | 184 | config MMC_SDHCI_PXAV3 |
182 | tristate "Marvell PXA168/PXA910/MMP2 SD Host Controller support" | 185 | tristate "Marvell MMP2 SD Host Controller support (PXAV3)" |
183 | depends on ARCH_PXA || ARCH_MMP | 186 | depends on CLKDEV_LOOKUP |
184 | select MMC_SDHCI | 187 | select MMC_SDHCI |
185 | select MMC_SDHCI_IO_ACCESSORS | 188 | select MMC_SDHCI_PLTFM |
189 | default CPU_MMP2 | ||
190 | help | ||
191 | This selects the Marvell(R) PXAV3 SD Host Controller. | ||
192 | If you have a MMP2 platform with SD Host Controller | ||
193 | and a card slot, say Y or M here. | ||
194 | |||
195 | If unsure, say N. | ||
196 | |||
197 | config MMC_SDHCI_PXAV2 | ||
198 | tristate "Marvell PXA9XX SD Host Controller support (PXAV2)" | ||
199 | depends on CLKDEV_LOOKUP | ||
200 | select MMC_SDHCI | ||
201 | select MMC_SDHCI_PLTFM | ||
202 | default CPU_PXA910 | ||
186 | help | 203 | help |
187 | This selects the Marvell(R) PXA168/PXA910/MMP2 SD Host Controller. | 204 | This selects the Marvell(R) PXAV2 SD Host Controller. |
188 | If you have a PXA168/PXA910/MMP2 platform with SD Host Controller | 205 | If you have a PXA9XX platform with SD Host Controller |
189 | and a card slot, say Y or M here. | 206 | and a card slot, say Y or M here. |
190 | 207 | ||
191 | If unsure, say N. | 208 | If unsure, say N. |
@@ -281,13 +298,12 @@ config MMC_ATMELMCI | |||
281 | endchoice | 298 | endchoice |
282 | 299 | ||
283 | config MMC_ATMELMCI_DMA | 300 | config MMC_ATMELMCI_DMA |
284 | bool "Atmel MCI DMA support (EXPERIMENTAL)" | 301 | bool "Atmel MCI DMA support" |
285 | depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE && EXPERIMENTAL | 302 | depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE |
286 | help | 303 | help |
287 | Say Y here to have the Atmel MCI driver use a DMA engine to | 304 | Say Y here to have the Atmel MCI driver use a DMA engine to |
288 | do data transfers and thus increase the throughput and | 305 | do data transfers and thus increase the throughput and |
289 | reduce the CPU utilization. Note that this is highly | 306 | reduce the CPU utilization. |
290 | experimental and may cause the driver to lock up. | ||
291 | 307 | ||
292 | If unsure, say N. | 308 | If unsure, say N. |
293 | 309 | ||
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 58a5cf73d6e9..b4b83f302e32 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
@@ -9,7 +9,8 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o | |||
9 | obj-$(CONFIG_MMC_MXS) += mxs-mmc.o | 9 | obj-$(CONFIG_MMC_MXS) += mxs-mmc.o |
10 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o | 10 | obj-$(CONFIG_MMC_SDHCI) += sdhci.o |
11 | obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o | 11 | obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o |
12 | obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o | 12 | obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o |
13 | obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o | ||
13 | obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o | 14 | obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o |
14 | obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o | 15 | obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o |
15 | obj-$(CONFIG_MMC_WBSD) += wbsd.o | 16 | obj-$(CONFIG_MMC_WBSD) += wbsd.o |
@@ -31,9 +32,7 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o | |||
31 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o | 32 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o |
32 | obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o | 33 | obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o |
33 | tmio_mmc_core-y := tmio_mmc_pio.o | 34 | tmio_mmc_core-y := tmio_mmc_pio.o |
34 | ifneq ($(CONFIG_MMC_SDHI),n) | 35 | tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_SDHI)) += tmio_mmc_dma.o |
35 | tmio_mmc_core-y += tmio_mmc_dma.o | ||
36 | endif | ||
37 | obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o | 36 | obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o |
38 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | 37 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o |
39 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o | 38 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o |
@@ -44,17 +43,13 @@ obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o | |||
44 | obj-$(CONFIG_MMC_VUB300) += vub300.o | 43 | obj-$(CONFIG_MMC_VUB300) += vub300.o |
45 | obj-$(CONFIG_MMC_USHC) += ushc.o | 44 | obj-$(CONFIG_MMC_USHC) += ushc.o |
46 | 45 | ||
47 | obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o | 46 | obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o |
48 | sdhci-platform-y := sdhci-pltfm.o | 47 | obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o |
49 | sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o | 48 | obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o |
50 | sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o | 49 | obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o |
51 | sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o | 50 | obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o |
52 | sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o | 51 | obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o |
53 | 52 | obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o | |
54 | obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o | ||
55 | sdhci-of-y := sdhci-of-core.o | ||
56 | sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o | ||
57 | sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o | ||
58 | 53 | ||
59 | ifeq ($(CONFIG_CB710_DEBUG),y) | 54 | ifeq ($(CONFIG_CB710_DEBUG),y) |
60 | CFLAGS-cb710-mmc += -DDEBUG | 55 | CFLAGS-cb710-mmc += -DDEBUG |
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index d3e6a962f423..a4aa3af86fed 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c | |||
@@ -77,7 +77,8 @@ | |||
77 | 77 | ||
78 | #include <mach/board.h> | 78 | #include <mach/board.h> |
79 | #include <mach/cpu.h> | 79 | #include <mach/cpu.h> |
80 | #include <mach/at91_mci.h> | 80 | |
81 | #include "at91_mci.h" | ||
81 | 82 | ||
82 | #define DRIVER_NAME "at91_mci" | 83 | #define DRIVER_NAME "at91_mci" |
83 | 84 | ||
diff --git a/drivers/mmc/host/at91_mci.h b/drivers/mmc/host/at91_mci.h new file mode 100644 index 000000000000..eec3a6b1c2bc --- /dev/null +++ b/drivers/mmc/host/at91_mci.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * drivers/mmc/host/at91_mci.h | ||
3 | * | ||
4 | * Copyright (C) 2005 Ivan Kokshaysky | ||
5 | * Copyright (C) SAN People | ||
6 | * | ||
7 | * MultiMedia Card Interface (MCI) registers. | ||
8 | * Based on AT91RM9200 datasheet revision F. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #ifndef AT91_MCI_H | ||
17 | #define AT91_MCI_H | ||
18 | |||
19 | #define AT91_MCI_CR 0x00 /* Control Register */ | ||
20 | #define AT91_MCI_MCIEN (1 << 0) /* Multi-Media Interface Enable */ | ||
21 | #define AT91_MCI_MCIDIS (1 << 1) /* Multi-Media Interface Disable */ | ||
22 | #define AT91_MCI_PWSEN (1 << 2) /* Power Save Mode Enable */ | ||
23 | #define AT91_MCI_PWSDIS (1 << 3) /* Power Save Mode Disable */ | ||
24 | #define AT91_MCI_SWRST (1 << 7) /* Software Reset */ | ||
25 | |||
26 | #define AT91_MCI_MR 0x04 /* Mode Register */ | ||
27 | #define AT91_MCI_CLKDIV (0xff << 0) /* Clock Divider */ | ||
28 | #define AT91_MCI_PWSDIV (7 << 8) /* Power Saving Divider */ | ||
29 | #define AT91_MCI_RDPROOF (1 << 11) /* Read Proof Enable [SAM926[03] only] */ | ||
30 | #define AT91_MCI_WRPROOF (1 << 12) /* Write Proof Enable [SAM926[03] only] */ | ||
31 | #define AT91_MCI_PDCFBYTE (1 << 13) /* PDC Force Byte Transfer [SAM926[03] only] */ | ||
32 | #define AT91_MCI_PDCPADV (1 << 14) /* PDC Padding Value */ | ||
33 | #define AT91_MCI_PDCMODE (1 << 15) /* PDC-orientated Mode */ | ||
34 | #define AT91_MCI_BLKLEN (0xfff << 18) /* Data Block Length */ | ||
35 | |||
36 | #define AT91_MCI_DTOR 0x08 /* Data Timeout Register */ | ||
37 | #define AT91_MCI_DTOCYC (0xf << 0) /* Data Timeout Cycle Number */ | ||
38 | #define AT91_MCI_DTOMUL (7 << 4) /* Data Timeout Multiplier */ | ||
39 | #define AT91_MCI_DTOMUL_1 (0 << 4) | ||
40 | #define AT91_MCI_DTOMUL_16 (1 << 4) | ||
41 | #define AT91_MCI_DTOMUL_128 (2 << 4) | ||
42 | #define AT91_MCI_DTOMUL_256 (3 << 4) | ||
43 | #define AT91_MCI_DTOMUL_1K (4 << 4) | ||
44 | #define AT91_MCI_DTOMUL_4K (5 << 4) | ||
45 | #define AT91_MCI_DTOMUL_64K (6 << 4) | ||
46 | #define AT91_MCI_DTOMUL_1M (7 << 4) | ||
47 | |||
48 | #define AT91_MCI_SDCR 0x0c /* SD Card Register */ | ||
49 | #define AT91_MCI_SDCSEL (3 << 0) /* SD Card Selector */ | ||
50 | #define AT91_MCI_SDCBUS (1 << 7) /* 1-bit or 4-bit bus */ | ||
51 | |||
52 | #define AT91_MCI_ARGR 0x10 /* Argument Register */ | ||
53 | |||
54 | #define AT91_MCI_CMDR 0x14 /* Command Register */ | ||
55 | #define AT91_MCI_CMDNB (0x3f << 0) /* Command Number */ | ||
56 | #define AT91_MCI_RSPTYP (3 << 6) /* Response Type */ | ||
57 | #define AT91_MCI_RSPTYP_NONE (0 << 6) | ||
58 | #define AT91_MCI_RSPTYP_48 (1 << 6) | ||
59 | #define AT91_MCI_RSPTYP_136 (2 << 6) | ||
60 | #define AT91_MCI_SPCMD (7 << 8) /* Special Command */ | ||
61 | #define AT91_MCI_SPCMD_NONE (0 << 8) | ||
62 | #define AT91_MCI_SPCMD_INIT (1 << 8) | ||
63 | #define AT91_MCI_SPCMD_SYNC (2 << 8) | ||
64 | #define AT91_MCI_SPCMD_ICMD (4 << 8) | ||
65 | #define AT91_MCI_SPCMD_IRESP (5 << 8) | ||
66 | #define AT91_MCI_OPDCMD (1 << 11) /* Open Drain Command */ | ||
67 | #define AT91_MCI_MAXLAT (1 << 12) /* Max Latency for Command to Response */ | ||
68 | #define AT91_MCI_TRCMD (3 << 16) /* Transfer Command */ | ||
69 | #define AT91_MCI_TRCMD_NONE (0 << 16) | ||
70 | #define AT91_MCI_TRCMD_START (1 << 16) | ||
71 | #define AT91_MCI_TRCMD_STOP (2 << 16) | ||
72 | #define AT91_MCI_TRDIR (1 << 18) /* Transfer Direction */ | ||
73 | #define AT91_MCI_TRTYP (3 << 19) /* Transfer Type */ | ||
74 | #define AT91_MCI_TRTYP_BLOCK (0 << 19) | ||
75 | #define AT91_MCI_TRTYP_MULTIPLE (1 << 19) | ||
76 | #define AT91_MCI_TRTYP_STREAM (2 << 19) | ||
77 | #define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19) | ||
78 | #define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19) | ||
79 | |||
80 | #define AT91_MCI_BLKR 0x18 /* Block Register */ | ||
81 | #define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ | ||
82 | #define AT91_MCI_BLKR_BLKLEN(n) ((0xffff & (n)) << 16) /* Block length */ | ||
83 | |||
84 | #define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */ | ||
85 | #define AT91_MCR_RDR 0x30 /* Receive Data Register */ | ||
86 | #define AT91_MCR_TDR 0x34 /* Transmit Data Register */ | ||
87 | |||
88 | #define AT91_MCI_SR 0x40 /* Status Register */ | ||
89 | #define AT91_MCI_CMDRDY (1 << 0) /* Command Ready */ | ||
90 | #define AT91_MCI_RXRDY (1 << 1) /* Receiver Ready */ | ||
91 | #define AT91_MCI_TXRDY (1 << 2) /* Transmit Ready */ | ||
92 | #define AT91_MCI_BLKE (1 << 3) /* Data Block Ended */ | ||
93 | #define AT91_MCI_DTIP (1 << 4) /* Data Transfer in Progress */ | ||
94 | #define AT91_MCI_NOTBUSY (1 << 5) /* Data Not Busy */ | ||
95 | #define AT91_MCI_ENDRX (1 << 6) /* End of RX Buffer */ | ||
96 | #define AT91_MCI_ENDTX (1 << 7) /* End fo TX Buffer */ | ||
97 | #define AT91_MCI_SDIOIRQA (1 << 8) /* SDIO Interrupt for Slot A */ | ||
98 | #define AT91_MCI_SDIOIRQB (1 << 9) /* SDIO Interrupt for Slot B */ | ||
99 | #define AT91_MCI_RXBUFF (1 << 14) /* RX Buffer Full */ | ||
100 | #define AT91_MCI_TXBUFE (1 << 15) /* TX Buffer Empty */ | ||
101 | #define AT91_MCI_RINDE (1 << 16) /* Response Index Error */ | ||
102 | #define AT91_MCI_RDIRE (1 << 17) /* Response Direction Error */ | ||
103 | #define AT91_MCI_RCRCE (1 << 18) /* Response CRC Error */ | ||
104 | #define AT91_MCI_RENDE (1 << 19) /* Response End Bit Error */ | ||
105 | #define AT91_MCI_RTOE (1 << 20) /* Response Time-out Error */ | ||
106 | #define AT91_MCI_DCRCE (1 << 21) /* Data CRC Error */ | ||
107 | #define AT91_MCI_DTOE (1 << 22) /* Data Time-out Error */ | ||
108 | #define AT91_MCI_OVRE (1 << 30) /* Overrun */ | ||
109 | #define AT91_MCI_UNRE (1 << 31) /* Underrun */ | ||
110 | |||
111 | #define AT91_MCI_IER 0x44 /* Interrupt Enable Register */ | ||
112 | #define AT91_MCI_IDR 0x48 /* Interrupt Disable Register */ | ||
113 | #define AT91_MCI_IMR 0x4c /* Interrupt Mask Register */ | ||
114 | |||
115 | #endif | ||
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index aa8039f473c4..fa8cae1d7005 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -203,6 +203,7 @@ struct atmel_mci_slot { | |||
203 | #define ATMCI_CARD_PRESENT 0 | 203 | #define ATMCI_CARD_PRESENT 0 |
204 | #define ATMCI_CARD_NEED_INIT 1 | 204 | #define ATMCI_CARD_NEED_INIT 1 |
205 | #define ATMCI_SHUTDOWN 2 | 205 | #define ATMCI_SHUTDOWN 2 |
206 | #define ATMCI_SUSPENDED 3 | ||
206 | 207 | ||
207 | int detect_pin; | 208 | int detect_pin; |
208 | int wp_pin; | 209 | int wp_pin; |
@@ -1878,10 +1879,72 @@ static int __exit atmci_remove(struct platform_device *pdev) | |||
1878 | return 0; | 1879 | return 0; |
1879 | } | 1880 | } |
1880 | 1881 | ||
1882 | #ifdef CONFIG_PM | ||
1883 | static int atmci_suspend(struct device *dev) | ||
1884 | { | ||
1885 | struct atmel_mci *host = dev_get_drvdata(dev); | ||
1886 | int i; | ||
1887 | |||
1888 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { | ||
1889 | struct atmel_mci_slot *slot = host->slot[i]; | ||
1890 | int ret; | ||
1891 | |||
1892 | if (!slot) | ||
1893 | continue; | ||
1894 | ret = mmc_suspend_host(slot->mmc); | ||
1895 | if (ret < 0) { | ||
1896 | while (--i >= 0) { | ||
1897 | slot = host->slot[i]; | ||
1898 | if (slot | ||
1899 | && test_bit(ATMCI_SUSPENDED, &slot->flags)) { | ||
1900 | mmc_resume_host(host->slot[i]->mmc); | ||
1901 | clear_bit(ATMCI_SUSPENDED, &slot->flags); | ||
1902 | } | ||
1903 | } | ||
1904 | return ret; | ||
1905 | } else { | ||
1906 | set_bit(ATMCI_SUSPENDED, &slot->flags); | ||
1907 | } | ||
1908 | } | ||
1909 | |||
1910 | return 0; | ||
1911 | } | ||
1912 | |||
1913 | static int atmci_resume(struct device *dev) | ||
1914 | { | ||
1915 | struct atmel_mci *host = dev_get_drvdata(dev); | ||
1916 | int i; | ||
1917 | int ret = 0; | ||
1918 | |||
1919 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { | ||
1920 | struct atmel_mci_slot *slot = host->slot[i]; | ||
1921 | int err; | ||
1922 | |||
1923 | slot = host->slot[i]; | ||
1924 | if (!slot) | ||
1925 | continue; | ||
1926 | if (!test_bit(ATMCI_SUSPENDED, &slot->flags)) | ||
1927 | continue; | ||
1928 | err = mmc_resume_host(slot->mmc); | ||
1929 | if (err < 0) | ||
1930 | ret = err; | ||
1931 | else | ||
1932 | clear_bit(ATMCI_SUSPENDED, &slot->flags); | ||
1933 | } | ||
1934 | |||
1935 | return ret; | ||
1936 | } | ||
1937 | static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume); | ||
1938 | #define ATMCI_PM_OPS (&atmci_pm) | ||
1939 | #else | ||
1940 | #define ATMCI_PM_OPS NULL | ||
1941 | #endif | ||
1942 | |||
1881 | static struct platform_driver atmci_driver = { | 1943 | static struct platform_driver atmci_driver = { |
1882 | .remove = __exit_p(atmci_remove), | 1944 | .remove = __exit_p(atmci_remove), |
1883 | .driver = { | 1945 | .driver = { |
1884 | .name = "atmel_mci", | 1946 | .name = "atmel_mci", |
1947 | .pm = ATMCI_PM_OPS, | ||
1885 | }, | 1948 | }, |
1886 | }; | 1949 | }; |
1887 | 1950 | ||
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 66dcddb9c205..0c839d3338db 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/mmc/dw_mmc.h> | 33 | #include <linux/mmc/dw_mmc.h> |
34 | #include <linux/bitops.h> | 34 | #include <linux/bitops.h> |
35 | #include <linux/regulator/consumer.h> | 35 | #include <linux/regulator/consumer.h> |
36 | #include <linux/workqueue.h> | ||
36 | 37 | ||
37 | #include "dw_mmc.h" | 38 | #include "dw_mmc.h" |
38 | 39 | ||
@@ -100,6 +101,8 @@ struct dw_mci_slot { | |||
100 | int last_detect_state; | 101 | int last_detect_state; |
101 | }; | 102 | }; |
102 | 103 | ||
104 | static struct workqueue_struct *dw_mci_card_workqueue; | ||
105 | |||
103 | #if defined(CONFIG_DEBUG_FS) | 106 | #if defined(CONFIG_DEBUG_FS) |
104 | static int dw_mci_req_show(struct seq_file *s, void *v) | 107 | static int dw_mci_req_show(struct seq_file *s, void *v) |
105 | { | 108 | { |
@@ -284,7 +287,7 @@ static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) | |||
284 | /* DMA interface functions */ | 287 | /* DMA interface functions */ |
285 | static void dw_mci_stop_dma(struct dw_mci *host) | 288 | static void dw_mci_stop_dma(struct dw_mci *host) |
286 | { | 289 | { |
287 | if (host->use_dma) { | 290 | if (host->using_dma) { |
288 | host->dma_ops->stop(host); | 291 | host->dma_ops->stop(host); |
289 | host->dma_ops->cleanup(host); | 292 | host->dma_ops->cleanup(host); |
290 | } else { | 293 | } else { |
@@ -432,6 +435,8 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) | |||
432 | unsigned int i, direction, sg_len; | 435 | unsigned int i, direction, sg_len; |
433 | u32 temp; | 436 | u32 temp; |
434 | 437 | ||
438 | host->using_dma = 0; | ||
439 | |||
435 | /* If we don't have a channel, we can't do DMA */ | 440 | /* If we don't have a channel, we can't do DMA */ |
436 | if (!host->use_dma) | 441 | if (!host->use_dma) |
437 | return -ENODEV; | 442 | return -ENODEV; |
@@ -451,6 +456,8 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) | |||
451 | return -EINVAL; | 456 | return -EINVAL; |
452 | } | 457 | } |
453 | 458 | ||
459 | host->using_dma = 1; | ||
460 | |||
454 | if (data->flags & MMC_DATA_READ) | 461 | if (data->flags & MMC_DATA_READ) |
455 | direction = DMA_FROM_DEVICE; | 462 | direction = DMA_FROM_DEVICE; |
456 | else | 463 | else |
@@ -489,14 +496,18 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) | |||
489 | host->sg = NULL; | 496 | host->sg = NULL; |
490 | host->data = data; | 497 | host->data = data; |
491 | 498 | ||
499 | if (data->flags & MMC_DATA_READ) | ||
500 | host->dir_status = DW_MCI_RECV_STATUS; | ||
501 | else | ||
502 | host->dir_status = DW_MCI_SEND_STATUS; | ||
503 | |||
492 | if (dw_mci_submit_data_dma(host, data)) { | 504 | if (dw_mci_submit_data_dma(host, data)) { |
493 | host->sg = data->sg; | 505 | host->sg = data->sg; |
494 | host->pio_offset = 0; | 506 | host->pio_offset = 0; |
495 | if (data->flags & MMC_DATA_READ) | 507 | host->part_buf_start = 0; |
496 | host->dir_status = DW_MCI_RECV_STATUS; | 508 | host->part_buf_count = 0; |
497 | else | ||
498 | host->dir_status = DW_MCI_SEND_STATUS; | ||
499 | 509 | ||
510 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); | ||
500 | temp = mci_readl(host, INTMASK); | 511 | temp = mci_readl(host, INTMASK); |
501 | temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; | 512 | temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; |
502 | mci_writel(host, INTMASK, temp); | 513 | mci_writel(host, INTMASK, temp); |
@@ -574,7 +585,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) | |||
574 | } | 585 | } |
575 | 586 | ||
576 | /* Set the current slot bus width */ | 587 | /* Set the current slot bus width */ |
577 | mci_writel(host, CTYPE, slot->ctype); | 588 | mci_writel(host, CTYPE, (slot->ctype << slot->id)); |
578 | } | 589 | } |
579 | 590 | ||
580 | static void dw_mci_start_request(struct dw_mci *host, | 591 | static void dw_mci_start_request(struct dw_mci *host, |
@@ -624,13 +635,13 @@ static void dw_mci_start_request(struct dw_mci *host, | |||
624 | host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); | 635 | host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); |
625 | } | 636 | } |
626 | 637 | ||
638 | /* must be called with host->lock held */ | ||
627 | static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, | 639 | static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, |
628 | struct mmc_request *mrq) | 640 | struct mmc_request *mrq) |
629 | { | 641 | { |
630 | dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", | 642 | dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", |
631 | host->state); | 643 | host->state); |
632 | 644 | ||
633 | spin_lock_bh(&host->lock); | ||
634 | slot->mrq = mrq; | 645 | slot->mrq = mrq; |
635 | 646 | ||
636 | if (host->state == STATE_IDLE) { | 647 | if (host->state == STATE_IDLE) { |
@@ -639,8 +650,6 @@ static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, | |||
639 | } else { | 650 | } else { |
640 | list_add_tail(&slot->queue_node, &host->queue); | 651 | list_add_tail(&slot->queue_node, &host->queue); |
641 | } | 652 | } |
642 | |||
643 | spin_unlock_bh(&host->lock); | ||
644 | } | 653 | } |
645 | 654 | ||
646 | static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) | 655 | static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) |
@@ -650,14 +659,23 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
650 | 659 | ||
651 | WARN_ON(slot->mrq); | 660 | WARN_ON(slot->mrq); |
652 | 661 | ||
662 | /* | ||
663 | * The check for card presence and queueing of the request must be | ||
664 | * atomic, otherwise the card could be removed in between and the | ||
665 | * request wouldn't fail until another card was inserted. | ||
666 | */ | ||
667 | spin_lock_bh(&host->lock); | ||
668 | |||
653 | if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { | 669 | if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { |
670 | spin_unlock_bh(&host->lock); | ||
654 | mrq->cmd->error = -ENOMEDIUM; | 671 | mrq->cmd->error = -ENOMEDIUM; |
655 | mmc_request_done(mmc, mrq); | 672 | mmc_request_done(mmc, mrq); |
656 | return; | 673 | return; |
657 | } | 674 | } |
658 | 675 | ||
659 | /* We don't support multiple blocks of weird lengths. */ | ||
660 | dw_mci_queue_request(host, slot, mrq); | 676 | dw_mci_queue_request(host, slot, mrq); |
677 | |||
678 | spin_unlock_bh(&host->lock); | ||
661 | } | 679 | } |
662 | 680 | ||
663 | static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 681 | static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
@@ -831,7 +849,7 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
831 | struct mmc_command *cmd; | 849 | struct mmc_command *cmd; |
832 | enum dw_mci_state state; | 850 | enum dw_mci_state state; |
833 | enum dw_mci_state prev_state; | 851 | enum dw_mci_state prev_state; |
834 | u32 status; | 852 | u32 status, ctrl; |
835 | 853 | ||
836 | spin_lock(&host->lock); | 854 | spin_lock(&host->lock); |
837 | 855 | ||
@@ -891,13 +909,19 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
891 | 909 | ||
892 | if (status & DW_MCI_DATA_ERROR_FLAGS) { | 910 | if (status & DW_MCI_DATA_ERROR_FLAGS) { |
893 | if (status & SDMMC_INT_DTO) { | 911 | if (status & SDMMC_INT_DTO) { |
894 | dev_err(&host->pdev->dev, | ||
895 | "data timeout error\n"); | ||
896 | data->error = -ETIMEDOUT; | 912 | data->error = -ETIMEDOUT; |
897 | } else if (status & SDMMC_INT_DCRC) { | 913 | } else if (status & SDMMC_INT_DCRC) { |
898 | dev_err(&host->pdev->dev, | ||
899 | "data CRC error\n"); | ||
900 | data->error = -EILSEQ; | 914 | data->error = -EILSEQ; |
915 | } else if (status & SDMMC_INT_EBE && | ||
916 | host->dir_status == | ||
917 | DW_MCI_SEND_STATUS) { | ||
918 | /* | ||
919 | * No data CRC status was returned. | ||
920 | * The number of bytes transferred will | ||
921 | * be exaggerated in PIO mode. | ||
922 | */ | ||
923 | data->bytes_xfered = 0; | ||
924 | data->error = -ETIMEDOUT; | ||
901 | } else { | 925 | } else { |
902 | dev_err(&host->pdev->dev, | 926 | dev_err(&host->pdev->dev, |
903 | "data FIFO error " | 927 | "data FIFO error " |
@@ -905,6 +929,16 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
905 | status); | 929 | status); |
906 | data->error = -EIO; | 930 | data->error = -EIO; |
907 | } | 931 | } |
932 | /* | ||
933 | * After an error, there may be data lingering | ||
934 | * in the FIFO, so reset it - doing so | ||
935 | * generates a block interrupt, hence setting | ||
936 | * the scatter-gather pointer to NULL. | ||
937 | */ | ||
938 | host->sg = NULL; | ||
939 | ctrl = mci_readl(host, CTRL); | ||
940 | ctrl |= SDMMC_CTRL_FIFO_RESET; | ||
941 | mci_writel(host, CTRL, ctrl); | ||
908 | } else { | 942 | } else { |
909 | data->bytes_xfered = data->blocks * data->blksz; | 943 | data->bytes_xfered = data->blocks * data->blksz; |
910 | data->error = 0; | 944 | data->error = 0; |
@@ -946,84 +980,278 @@ unlock: | |||
946 | 980 | ||
947 | } | 981 | } |
948 | 982 | ||
949 | static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) | 983 | /* push final bytes to part_buf, only use during push */ |
984 | static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) | ||
950 | { | 985 | { |
951 | u16 *pdata = (u16 *)buf; | 986 | memcpy((void *)&host->part_buf, buf, cnt); |
987 | host->part_buf_count = cnt; | ||
988 | } | ||
952 | 989 | ||
953 | WARN_ON(cnt % 2 != 0); | 990 | /* append bytes to part_buf, only use during push */ |
991 | static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) | ||
992 | { | ||
993 | cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); | ||
994 | memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); | ||
995 | host->part_buf_count += cnt; | ||
996 | return cnt; | ||
997 | } | ||
954 | 998 | ||
955 | cnt = cnt >> 1; | 999 | /* pull first bytes from part_buf, only use during pull */ |
956 | while (cnt > 0) { | 1000 | static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) |
957 | mci_writew(host, DATA, *pdata++); | 1001 | { |
958 | cnt--; | 1002 | cnt = min(cnt, (int)host->part_buf_count); |
1003 | if (cnt) { | ||
1004 | memcpy(buf, (void *)&host->part_buf + host->part_buf_start, | ||
1005 | cnt); | ||
1006 | host->part_buf_count -= cnt; | ||
1007 | host->part_buf_start += cnt; | ||
959 | } | 1008 | } |
1009 | return cnt; | ||
960 | } | 1010 | } |
961 | 1011 | ||
962 | static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) | 1012 | /* pull final bytes from the part_buf, assuming it's just been filled */ |
1013 | static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) | ||
963 | { | 1014 | { |
964 | u16 *pdata = (u16 *)buf; | 1015 | memcpy(buf, &host->part_buf, cnt); |
1016 | host->part_buf_start = cnt; | ||
1017 | host->part_buf_count = (1 << host->data_shift) - cnt; | ||
1018 | } | ||
965 | 1019 | ||
966 | WARN_ON(cnt % 2 != 0); | 1020 | static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) |
1021 | { | ||
1022 | /* try and push anything in the part_buf */ | ||
1023 | if (unlikely(host->part_buf_count)) { | ||
1024 | int len = dw_mci_push_part_bytes(host, buf, cnt); | ||
1025 | buf += len; | ||
1026 | cnt -= len; | ||
1027 | if (!sg_next(host->sg) || host->part_buf_count == 2) { | ||
1028 | mci_writew(host, DATA, host->part_buf16); | ||
1029 | host->part_buf_count = 0; | ||
1030 | } | ||
1031 | } | ||
1032 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
1033 | if (unlikely((unsigned long)buf & 0x1)) { | ||
1034 | while (cnt >= 2) { | ||
1035 | u16 aligned_buf[64]; | ||
1036 | int len = min(cnt & -2, (int)sizeof(aligned_buf)); | ||
1037 | int items = len >> 1; | ||
1038 | int i; | ||
1039 | /* memcpy from input buffer into aligned buffer */ | ||
1040 | memcpy(aligned_buf, buf, len); | ||
1041 | buf += len; | ||
1042 | cnt -= len; | ||
1043 | /* push data from aligned buffer into fifo */ | ||
1044 | for (i = 0; i < items; ++i) | ||
1045 | mci_writew(host, DATA, aligned_buf[i]); | ||
1046 | } | ||
1047 | } else | ||
1048 | #endif | ||
1049 | { | ||
1050 | u16 *pdata = buf; | ||
1051 | for (; cnt >= 2; cnt -= 2) | ||
1052 | mci_writew(host, DATA, *pdata++); | ||
1053 | buf = pdata; | ||
1054 | } | ||
1055 | /* put anything remaining in the part_buf */ | ||
1056 | if (cnt) { | ||
1057 | dw_mci_set_part_bytes(host, buf, cnt); | ||
1058 | if (!sg_next(host->sg)) | ||
1059 | mci_writew(host, DATA, host->part_buf16); | ||
1060 | } | ||
1061 | } | ||
967 | 1062 | ||
968 | cnt = cnt >> 1; | 1063 | static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) |
969 | while (cnt > 0) { | 1064 | { |
970 | *pdata++ = mci_readw(host, DATA); | 1065 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
971 | cnt--; | 1066 | if (unlikely((unsigned long)buf & 0x1)) { |
1067 | while (cnt >= 2) { | ||
1068 | /* pull data from fifo into aligned buffer */ | ||
1069 | u16 aligned_buf[64]; | ||
1070 | int len = min(cnt & -2, (int)sizeof(aligned_buf)); | ||
1071 | int items = len >> 1; | ||
1072 | int i; | ||
1073 | for (i = 0; i < items; ++i) | ||
1074 | aligned_buf[i] = mci_readw(host, DATA); | ||
1075 | /* memcpy from aligned buffer into output buffer */ | ||
1076 | memcpy(buf, aligned_buf, len); | ||
1077 | buf += len; | ||
1078 | cnt -= len; | ||
1079 | } | ||
1080 | } else | ||
1081 | #endif | ||
1082 | { | ||
1083 | u16 *pdata = buf; | ||
1084 | for (; cnt >= 2; cnt -= 2) | ||
1085 | *pdata++ = mci_readw(host, DATA); | ||
1086 | buf = pdata; | ||
1087 | } | ||
1088 | if (cnt) { | ||
1089 | host->part_buf16 = mci_readw(host, DATA); | ||
1090 | dw_mci_pull_final_bytes(host, buf, cnt); | ||
972 | } | 1091 | } |
973 | } | 1092 | } |
974 | 1093 | ||
975 | static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) | 1094 | static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) |
976 | { | 1095 | { |
977 | u32 *pdata = (u32 *)buf; | 1096 | /* try and push anything in the part_buf */ |
978 | 1097 | if (unlikely(host->part_buf_count)) { | |
979 | WARN_ON(cnt % 4 != 0); | 1098 | int len = dw_mci_push_part_bytes(host, buf, cnt); |
980 | WARN_ON((unsigned long)pdata & 0x3); | 1099 | buf += len; |
981 | 1100 | cnt -= len; | |
982 | cnt = cnt >> 2; | 1101 | if (!sg_next(host->sg) || host->part_buf_count == 4) { |
983 | while (cnt > 0) { | 1102 | mci_writel(host, DATA, host->part_buf32); |
984 | mci_writel(host, DATA, *pdata++); | 1103 | host->part_buf_count = 0; |
985 | cnt--; | 1104 | } |
1105 | } | ||
1106 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
1107 | if (unlikely((unsigned long)buf & 0x3)) { | ||
1108 | while (cnt >= 4) { | ||
1109 | u32 aligned_buf[32]; | ||
1110 | int len = min(cnt & -4, (int)sizeof(aligned_buf)); | ||
1111 | int items = len >> 2; | ||
1112 | int i; | ||
1113 | /* memcpy from input buffer into aligned buffer */ | ||
1114 | memcpy(aligned_buf, buf, len); | ||
1115 | buf += len; | ||
1116 | cnt -= len; | ||
1117 | /* push data from aligned buffer into fifo */ | ||
1118 | for (i = 0; i < items; ++i) | ||
1119 | mci_writel(host, DATA, aligned_buf[i]); | ||
1120 | } | ||
1121 | } else | ||
1122 | #endif | ||
1123 | { | ||
1124 | u32 *pdata = buf; | ||
1125 | for (; cnt >= 4; cnt -= 4) | ||
1126 | mci_writel(host, DATA, *pdata++); | ||
1127 | buf = pdata; | ||
1128 | } | ||
1129 | /* put anything remaining in the part_buf */ | ||
1130 | if (cnt) { | ||
1131 | dw_mci_set_part_bytes(host, buf, cnt); | ||
1132 | if (!sg_next(host->sg)) | ||
1133 | mci_writel(host, DATA, host->part_buf32); | ||
986 | } | 1134 | } |
987 | } | 1135 | } |
988 | 1136 | ||
989 | static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) | 1137 | static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) |
990 | { | 1138 | { |
991 | u32 *pdata = (u32 *)buf; | 1139 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
992 | 1140 | if (unlikely((unsigned long)buf & 0x3)) { | |
993 | WARN_ON(cnt % 4 != 0); | 1141 | while (cnt >= 4) { |
994 | WARN_ON((unsigned long)pdata & 0x3); | 1142 | /* pull data from fifo into aligned buffer */ |
995 | 1143 | u32 aligned_buf[32]; | |
996 | cnt = cnt >> 2; | 1144 | int len = min(cnt & -4, (int)sizeof(aligned_buf)); |
997 | while (cnt > 0) { | 1145 | int items = len >> 2; |
998 | *pdata++ = mci_readl(host, DATA); | 1146 | int i; |
999 | cnt--; | 1147 | for (i = 0; i < items; ++i) |
1148 | aligned_buf[i] = mci_readl(host, DATA); | ||
1149 | /* memcpy from aligned buffer into output buffer */ | ||
1150 | memcpy(buf, aligned_buf, len); | ||
1151 | buf += len; | ||
1152 | cnt -= len; | ||
1153 | } | ||
1154 | } else | ||
1155 | #endif | ||
1156 | { | ||
1157 | u32 *pdata = buf; | ||
1158 | for (; cnt >= 4; cnt -= 4) | ||
1159 | *pdata++ = mci_readl(host, DATA); | ||
1160 | buf = pdata; | ||
1161 | } | ||
1162 | if (cnt) { | ||
1163 | host->part_buf32 = mci_readl(host, DATA); | ||
1164 | dw_mci_pull_final_bytes(host, buf, cnt); | ||
1000 | } | 1165 | } |
1001 | } | 1166 | } |
1002 | 1167 | ||
1003 | static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) | 1168 | static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) |
1004 | { | 1169 | { |
1005 | u64 *pdata = (u64 *)buf; | 1170 | /* try and push anything in the part_buf */ |
1006 | 1171 | if (unlikely(host->part_buf_count)) { | |
1007 | WARN_ON(cnt % 8 != 0); | 1172 | int len = dw_mci_push_part_bytes(host, buf, cnt); |
1008 | 1173 | buf += len; | |
1009 | cnt = cnt >> 3; | 1174 | cnt -= len; |
1010 | while (cnt > 0) { | 1175 | if (!sg_next(host->sg) || host->part_buf_count == 8) { |
1011 | mci_writeq(host, DATA, *pdata++); | 1176 | mci_writew(host, DATA, host->part_buf); |
1012 | cnt--; | 1177 | host->part_buf_count = 0; |
1178 | } | ||
1179 | } | ||
1180 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
1181 | if (unlikely((unsigned long)buf & 0x7)) { | ||
1182 | while (cnt >= 8) { | ||
1183 | u64 aligned_buf[16]; | ||
1184 | int len = min(cnt & -8, (int)sizeof(aligned_buf)); | ||
1185 | int items = len >> 3; | ||
1186 | int i; | ||
1187 | /* memcpy from input buffer into aligned buffer */ | ||
1188 | memcpy(aligned_buf, buf, len); | ||
1189 | buf += len; | ||
1190 | cnt -= len; | ||
1191 | /* push data from aligned buffer into fifo */ | ||
1192 | for (i = 0; i < items; ++i) | ||
1193 | mci_writeq(host, DATA, aligned_buf[i]); | ||
1194 | } | ||
1195 | } else | ||
1196 | #endif | ||
1197 | { | ||
1198 | u64 *pdata = buf; | ||
1199 | for (; cnt >= 8; cnt -= 8) | ||
1200 | mci_writeq(host, DATA, *pdata++); | ||
1201 | buf = pdata; | ||
1202 | } | ||
1203 | /* put anything remaining in the part_buf */ | ||
1204 | if (cnt) { | ||
1205 | dw_mci_set_part_bytes(host, buf, cnt); | ||
1206 | if (!sg_next(host->sg)) | ||
1207 | mci_writeq(host, DATA, host->part_buf); | ||
1013 | } | 1208 | } |
1014 | } | 1209 | } |
1015 | 1210 | ||
1016 | static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) | 1211 | static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) |
1017 | { | 1212 | { |
1018 | u64 *pdata = (u64 *)buf; | 1213 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
1214 | if (unlikely((unsigned long)buf & 0x7)) { | ||
1215 | while (cnt >= 8) { | ||
1216 | /* pull data from fifo into aligned buffer */ | ||
1217 | u64 aligned_buf[16]; | ||
1218 | int len = min(cnt & -8, (int)sizeof(aligned_buf)); | ||
1219 | int items = len >> 3; | ||
1220 | int i; | ||
1221 | for (i = 0; i < items; ++i) | ||
1222 | aligned_buf[i] = mci_readq(host, DATA); | ||
1223 | /* memcpy from aligned buffer into output buffer */ | ||
1224 | memcpy(buf, aligned_buf, len); | ||
1225 | buf += len; | ||
1226 | cnt -= len; | ||
1227 | } | ||
1228 | } else | ||
1229 | #endif | ||
1230 | { | ||
1231 | u64 *pdata = buf; | ||
1232 | for (; cnt >= 8; cnt -= 8) | ||
1233 | *pdata++ = mci_readq(host, DATA); | ||
1234 | buf = pdata; | ||
1235 | } | ||
1236 | if (cnt) { | ||
1237 | host->part_buf = mci_readq(host, DATA); | ||
1238 | dw_mci_pull_final_bytes(host, buf, cnt); | ||
1239 | } | ||
1240 | } | ||
1019 | 1241 | ||
1020 | WARN_ON(cnt % 8 != 0); | 1242 | static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) |
1243 | { | ||
1244 | int len; | ||
1021 | 1245 | ||
1022 | cnt = cnt >> 3; | 1246 | /* get remaining partial bytes */ |
1023 | while (cnt > 0) { | 1247 | len = dw_mci_pull_part_bytes(host, buf, cnt); |
1024 | *pdata++ = mci_readq(host, DATA); | 1248 | if (unlikely(len == cnt)) |
1025 | cnt--; | 1249 | return; |
1026 | } | 1250 | buf += len; |
1251 | cnt -= len; | ||
1252 | |||
1253 | /* get the rest of the data */ | ||
1254 | host->pull_data(host, buf, cnt); | ||
1027 | } | 1255 | } |
1028 | 1256 | ||
1029 | static void dw_mci_read_data_pio(struct dw_mci *host) | 1257 | static void dw_mci_read_data_pio(struct dw_mci *host) |
@@ -1037,9 +1265,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host) | |||
1037 | unsigned int nbytes = 0, len; | 1265 | unsigned int nbytes = 0, len; |
1038 | 1266 | ||
1039 | do { | 1267 | do { |
1040 | len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift; | 1268 | len = host->part_buf_count + |
1269 | (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift); | ||
1041 | if (offset + len <= sg->length) { | 1270 | if (offset + len <= sg->length) { |
1042 | host->pull_data(host, (void *)(buf + offset), len); | 1271 | dw_mci_pull_data(host, (void *)(buf + offset), len); |
1043 | 1272 | ||
1044 | offset += len; | 1273 | offset += len; |
1045 | nbytes += len; | 1274 | nbytes += len; |
@@ -1055,8 +1284,8 @@ static void dw_mci_read_data_pio(struct dw_mci *host) | |||
1055 | } | 1284 | } |
1056 | } else { | 1285 | } else { |
1057 | unsigned int remaining = sg->length - offset; | 1286 | unsigned int remaining = sg->length - offset; |
1058 | host->pull_data(host, (void *)(buf + offset), | 1287 | dw_mci_pull_data(host, (void *)(buf + offset), |
1059 | remaining); | 1288 | remaining); |
1060 | nbytes += remaining; | 1289 | nbytes += remaining; |
1061 | 1290 | ||
1062 | flush_dcache_page(sg_page(sg)); | 1291 | flush_dcache_page(sg_page(sg)); |
@@ -1066,7 +1295,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host) | |||
1066 | 1295 | ||
1067 | offset = len - remaining; | 1296 | offset = len - remaining; |
1068 | buf = sg_virt(sg); | 1297 | buf = sg_virt(sg); |
1069 | host->pull_data(host, buf, offset); | 1298 | dw_mci_pull_data(host, buf, offset); |
1070 | nbytes += offset; | 1299 | nbytes += offset; |
1071 | } | 1300 | } |
1072 | 1301 | ||
@@ -1083,7 +1312,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host) | |||
1083 | return; | 1312 | return; |
1084 | } | 1313 | } |
1085 | } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ | 1314 | } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ |
1086 | len = SDMMC_GET_FCNT(mci_readl(host, STATUS)); | ||
1087 | host->pio_offset = offset; | 1315 | host->pio_offset = offset; |
1088 | data->bytes_xfered += nbytes; | 1316 | data->bytes_xfered += nbytes; |
1089 | return; | 1317 | return; |
@@ -1105,8 +1333,9 @@ static void dw_mci_write_data_pio(struct dw_mci *host) | |||
1105 | unsigned int nbytes = 0, len; | 1333 | unsigned int nbytes = 0, len; |
1106 | 1334 | ||
1107 | do { | 1335 | do { |
1108 | len = SDMMC_FIFO_SZ - | 1336 | len = ((host->fifo_depth - |
1109 | (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift); | 1337 | SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift) |
1338 | - host->part_buf_count; | ||
1110 | if (offset + len <= sg->length) { | 1339 | if (offset + len <= sg->length) { |
1111 | host->push_data(host, (void *)(buf + offset), len); | 1340 | host->push_data(host, (void *)(buf + offset), len); |
1112 | 1341 | ||
@@ -1151,10 +1380,8 @@ static void dw_mci_write_data_pio(struct dw_mci *host) | |||
1151 | return; | 1380 | return; |
1152 | } | 1381 | } |
1153 | } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ | 1382 | } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ |
1154 | |||
1155 | host->pio_offset = offset; | 1383 | host->pio_offset = offset; |
1156 | data->bytes_xfered += nbytes; | 1384 | data->bytes_xfered += nbytes; |
1157 | |||
1158 | return; | 1385 | return; |
1159 | 1386 | ||
1160 | done: | 1387 | done: |
@@ -1202,7 +1429,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1202 | host->cmd_status = status; | 1429 | host->cmd_status = status; |
1203 | smp_wmb(); | 1430 | smp_wmb(); |
1204 | set_bit(EVENT_CMD_COMPLETE, &host->pending_events); | 1431 | set_bit(EVENT_CMD_COMPLETE, &host->pending_events); |
1205 | tasklet_schedule(&host->tasklet); | ||
1206 | } | 1432 | } |
1207 | 1433 | ||
1208 | if (pending & DW_MCI_DATA_ERROR_FLAGS) { | 1434 | if (pending & DW_MCI_DATA_ERROR_FLAGS) { |
@@ -1211,7 +1437,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1211 | host->data_status = status; | 1437 | host->data_status = status; |
1212 | smp_wmb(); | 1438 | smp_wmb(); |
1213 | set_bit(EVENT_DATA_ERROR, &host->pending_events); | 1439 | set_bit(EVENT_DATA_ERROR, &host->pending_events); |
1214 | tasklet_schedule(&host->tasklet); | 1440 | if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | |
1441 | SDMMC_INT_SBE | SDMMC_INT_EBE))) | ||
1442 | tasklet_schedule(&host->tasklet); | ||
1215 | } | 1443 | } |
1216 | 1444 | ||
1217 | if (pending & SDMMC_INT_DATA_OVER) { | 1445 | if (pending & SDMMC_INT_DATA_OVER) { |
@@ -1229,13 +1457,13 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1229 | 1457 | ||
1230 | if (pending & SDMMC_INT_RXDR) { | 1458 | if (pending & SDMMC_INT_RXDR) { |
1231 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); | 1459 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); |
1232 | if (host->sg) | 1460 | if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) |
1233 | dw_mci_read_data_pio(host); | 1461 | dw_mci_read_data_pio(host); |
1234 | } | 1462 | } |
1235 | 1463 | ||
1236 | if (pending & SDMMC_INT_TXDR) { | 1464 | if (pending & SDMMC_INT_TXDR) { |
1237 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); | 1465 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); |
1238 | if (host->sg) | 1466 | if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) |
1239 | dw_mci_write_data_pio(host); | 1467 | dw_mci_write_data_pio(host); |
1240 | } | 1468 | } |
1241 | 1469 | ||
@@ -1246,7 +1474,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1246 | 1474 | ||
1247 | if (pending & SDMMC_INT_CD) { | 1475 | if (pending & SDMMC_INT_CD) { |
1248 | mci_writel(host, RINTSTS, SDMMC_INT_CD); | 1476 | mci_writel(host, RINTSTS, SDMMC_INT_CD); |
1249 | tasklet_schedule(&host->card_tasklet); | 1477 | queue_work(dw_mci_card_workqueue, &host->card_work); |
1250 | } | 1478 | } |
1251 | 1479 | ||
1252 | } while (pass_count++ < 5); | 1480 | } while (pass_count++ < 5); |
@@ -1265,9 +1493,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1265 | return IRQ_HANDLED; | 1493 | return IRQ_HANDLED; |
1266 | } | 1494 | } |
1267 | 1495 | ||
1268 | static void dw_mci_tasklet_card(unsigned long data) | 1496 | static void dw_mci_work_routine_card(struct work_struct *work) |
1269 | { | 1497 | { |
1270 | struct dw_mci *host = (struct dw_mci *)data; | 1498 | struct dw_mci *host = container_of(work, struct dw_mci, card_work); |
1271 | int i; | 1499 | int i; |
1272 | 1500 | ||
1273 | for (i = 0; i < host->num_slots; i++) { | 1501 | for (i = 0; i < host->num_slots; i++) { |
@@ -1279,22 +1507,21 @@ static void dw_mci_tasklet_card(unsigned long data) | |||
1279 | 1507 | ||
1280 | present = dw_mci_get_cd(mmc); | 1508 | present = dw_mci_get_cd(mmc); |
1281 | while (present != slot->last_detect_state) { | 1509 | while (present != slot->last_detect_state) { |
1282 | spin_lock(&host->lock); | ||
1283 | |||
1284 | dev_dbg(&slot->mmc->class_dev, "card %s\n", | 1510 | dev_dbg(&slot->mmc->class_dev, "card %s\n", |
1285 | present ? "inserted" : "removed"); | 1511 | present ? "inserted" : "removed"); |
1286 | 1512 | ||
1513 | /* Power up slot (before spin_lock, may sleep) */ | ||
1514 | if (present != 0 && host->pdata->setpower) | ||
1515 | host->pdata->setpower(slot->id, mmc->ocr_avail); | ||
1516 | |||
1517 | spin_lock_bh(&host->lock); | ||
1518 | |||
1287 | /* Card change detected */ | 1519 | /* Card change detected */ |
1288 | slot->last_detect_state = present; | 1520 | slot->last_detect_state = present; |
1289 | 1521 | ||
1290 | /* Power up slot */ | 1522 | /* Mark card as present if applicable */ |
1291 | if (present != 0) { | 1523 | if (present != 0) |
1292 | if (host->pdata->setpower) | ||
1293 | host->pdata->setpower(slot->id, | ||
1294 | mmc->ocr_avail); | ||
1295 | |||
1296 | set_bit(DW_MMC_CARD_PRESENT, &slot->flags); | 1524 | set_bit(DW_MMC_CARD_PRESENT, &slot->flags); |
1297 | } | ||
1298 | 1525 | ||
1299 | /* Clean up queue if present */ | 1526 | /* Clean up queue if present */ |
1300 | mrq = slot->mrq; | 1527 | mrq = slot->mrq; |
@@ -1344,8 +1571,6 @@ static void dw_mci_tasklet_card(unsigned long data) | |||
1344 | 1571 | ||
1345 | /* Power down slot */ | 1572 | /* Power down slot */ |
1346 | if (present == 0) { | 1573 | if (present == 0) { |
1347 | if (host->pdata->setpower) | ||
1348 | host->pdata->setpower(slot->id, 0); | ||
1349 | clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); | 1574 | clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); |
1350 | 1575 | ||
1351 | /* | 1576 | /* |
@@ -1367,7 +1592,12 @@ static void dw_mci_tasklet_card(unsigned long data) | |||
1367 | 1592 | ||
1368 | } | 1593 | } |
1369 | 1594 | ||
1370 | spin_unlock(&host->lock); | 1595 | spin_unlock_bh(&host->lock); |
1596 | |||
1597 | /* Power down slot (after spin_unlock, may sleep) */ | ||
1598 | if (present == 0 && host->pdata->setpower) | ||
1599 | host->pdata->setpower(slot->id, 0); | ||
1600 | |||
1371 | present = dw_mci_get_cd(mmc); | 1601 | present = dw_mci_get_cd(mmc); |
1372 | } | 1602 | } |
1373 | 1603 | ||
@@ -1467,7 +1697,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) | |||
1467 | * Card may have been plugged in prior to boot so we | 1697 | * Card may have been plugged in prior to boot so we |
1468 | * need to run the detect tasklet | 1698 | * need to run the detect tasklet |
1469 | */ | 1699 | */ |
1470 | tasklet_schedule(&host->card_tasklet); | 1700 | queue_work(dw_mci_card_workqueue, &host->card_work); |
1471 | 1701 | ||
1472 | return 0; | 1702 | return 0; |
1473 | } | 1703 | } |
@@ -1645,8 +1875,19 @@ static int dw_mci_probe(struct platform_device *pdev) | |||
1645 | * FIFO threshold settings RxMark = fifo_size / 2 - 1, | 1875 | * FIFO threshold settings RxMark = fifo_size / 2 - 1, |
1646 | * Tx Mark = fifo_size / 2 DMA Size = 8 | 1876 | * Tx Mark = fifo_size / 2 DMA Size = 8 |
1647 | */ | 1877 | */ |
1648 | fifo_size = mci_readl(host, FIFOTH); | 1878 | if (!host->pdata->fifo_depth) { |
1649 | fifo_size = (fifo_size >> 16) & 0x7ff; | 1879 | /* |
1880 | * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may | ||
1881 | * have been overwritten by the bootloader, just like we're | ||
1882 | * about to do, so if you know the value for your hardware, you | ||
1883 | * should put it in the platform data. | ||
1884 | */ | ||
1885 | fifo_size = mci_readl(host, FIFOTH); | ||
1886 | fifo_size = 1 + ((fifo_size >> 16) & 0x7ff); | ||
1887 | } else { | ||
1888 | fifo_size = host->pdata->fifo_depth; | ||
1889 | } | ||
1890 | host->fifo_depth = fifo_size; | ||
1650 | host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | | 1891 | host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | |
1651 | ((fifo_size/2) << 0)); | 1892 | ((fifo_size/2) << 0)); |
1652 | mci_writel(host, FIFOTH, host->fifoth_val); | 1893 | mci_writel(host, FIFOTH, host->fifoth_val); |
@@ -1656,12 +1897,15 @@ static int dw_mci_probe(struct platform_device *pdev) | |||
1656 | mci_writel(host, CLKSRC, 0); | 1897 | mci_writel(host, CLKSRC, 0); |
1657 | 1898 | ||
1658 | tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); | 1899 | tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); |
1659 | tasklet_init(&host->card_tasklet, | 1900 | dw_mci_card_workqueue = alloc_workqueue("dw-mci-card", |
1660 | dw_mci_tasklet_card, (unsigned long)host); | 1901 | WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); |
1902 | if (!dw_mci_card_workqueue) | ||
1903 | goto err_dmaunmap; | ||
1904 | INIT_WORK(&host->card_work, dw_mci_work_routine_card); | ||
1661 | 1905 | ||
1662 | ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host); | 1906 | ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host); |
1663 | if (ret) | 1907 | if (ret) |
1664 | goto err_dmaunmap; | 1908 | goto err_workqueue; |
1665 | 1909 | ||
1666 | platform_set_drvdata(pdev, host); | 1910 | platform_set_drvdata(pdev, host); |
1667 | 1911 | ||
@@ -1690,7 +1934,9 @@ static int dw_mci_probe(struct platform_device *pdev) | |||
1690 | mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ | 1934 | mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ |
1691 | 1935 | ||
1692 | dev_info(&pdev->dev, "DW MMC controller at irq %d, " | 1936 | dev_info(&pdev->dev, "DW MMC controller at irq %d, " |
1693 | "%d bit host data width\n", irq, width); | 1937 | "%d bit host data width, " |
1938 | "%u deep fifo\n", | ||
1939 | irq, width, fifo_size); | ||
1694 | if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) | 1940 | if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) |
1695 | dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n"); | 1941 | dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n"); |
1696 | 1942 | ||
@@ -1705,6 +1951,9 @@ err_init_slot: | |||
1705 | } | 1951 | } |
1706 | free_irq(irq, host); | 1952 | free_irq(irq, host); |
1707 | 1953 | ||
1954 | err_workqueue: | ||
1955 | destroy_workqueue(dw_mci_card_workqueue); | ||
1956 | |||
1708 | err_dmaunmap: | 1957 | err_dmaunmap: |
1709 | if (host->use_dma && host->dma_ops->exit) | 1958 | if (host->use_dma && host->dma_ops->exit) |
1710 | host->dma_ops->exit(host); | 1959 | host->dma_ops->exit(host); |
@@ -1744,6 +1993,7 @@ static int __exit dw_mci_remove(struct platform_device *pdev) | |||
1744 | mci_writel(host, CLKSRC, 0); | 1993 | mci_writel(host, CLKSRC, 0); |
1745 | 1994 | ||
1746 | free_irq(platform_get_irq(pdev, 0), host); | 1995 | free_irq(platform_get_irq(pdev, 0), host); |
1996 | destroy_workqueue(dw_mci_card_workqueue); | ||
1747 | dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); | 1997 | dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); |
1748 | 1998 | ||
1749 | if (host->use_dma && host->dma_ops->exit) | 1999 | if (host->use_dma && host->dma_ops->exit) |
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 23c662af5616..027d37735394 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h | |||
@@ -118,7 +118,6 @@ | |||
118 | #define SDMMC_CMD_INDX(n) ((n) & 0x1F) | 118 | #define SDMMC_CMD_INDX(n) ((n) & 0x1F) |
119 | /* Status register defines */ | 119 | /* Status register defines */ |
120 | #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF) | 120 | #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF) |
121 | #define SDMMC_FIFO_SZ 32 | ||
122 | /* Internal DMAC interrupt defines */ | 121 | /* Internal DMAC interrupt defines */ |
123 | #define SDMMC_IDMAC_INT_AI BIT(9) | 122 | #define SDMMC_IDMAC_INT_AI BIT(9) |
124 | #define SDMMC_IDMAC_INT_NI BIT(8) | 123 | #define SDMMC_IDMAC_INT_NI BIT(8) |
@@ -134,22 +133,22 @@ | |||
134 | 133 | ||
135 | /* Register access macros */ | 134 | /* Register access macros */ |
136 | #define mci_readl(dev, reg) \ | 135 | #define mci_readl(dev, reg) \ |
137 | __raw_readl(dev->regs + SDMMC_##reg) | 136 | __raw_readl((dev)->regs + SDMMC_##reg) |
138 | #define mci_writel(dev, reg, value) \ | 137 | #define mci_writel(dev, reg, value) \ |
139 | __raw_writel((value), dev->regs + SDMMC_##reg) | 138 | __raw_writel((value), (dev)->regs + SDMMC_##reg) |
140 | 139 | ||
141 | /* 16-bit FIFO access macros */ | 140 | /* 16-bit FIFO access macros */ |
142 | #define mci_readw(dev, reg) \ | 141 | #define mci_readw(dev, reg) \ |
143 | __raw_readw(dev->regs + SDMMC_##reg) | 142 | __raw_readw((dev)->regs + SDMMC_##reg) |
144 | #define mci_writew(dev, reg, value) \ | 143 | #define mci_writew(dev, reg, value) \ |
145 | __raw_writew((value), dev->regs + SDMMC_##reg) | 144 | __raw_writew((value), (dev)->regs + SDMMC_##reg) |
146 | 145 | ||
147 | /* 64-bit FIFO access macros */ | 146 | /* 64-bit FIFO access macros */ |
148 | #ifdef readq | 147 | #ifdef readq |
149 | #define mci_readq(dev, reg) \ | 148 | #define mci_readq(dev, reg) \ |
150 | __raw_readq(dev->regs + SDMMC_##reg) | 149 | __raw_readq((dev)->regs + SDMMC_##reg) |
151 | #define mci_writeq(dev, reg, value) \ | 150 | #define mci_writeq(dev, reg, value) \ |
152 | __raw_writeq((value), dev->regs + SDMMC_##reg) | 151 | __raw_writeq((value), (dev)->regs + SDMMC_##reg) |
153 | #else | 152 | #else |
154 | /* | 153 | /* |
155 | * Dummy readq implementation for architectures that don't define it. | 154 | * Dummy readq implementation for architectures that don't define it. |
@@ -160,9 +159,9 @@ | |||
160 | * rest of the code free from ifdefs. | 159 | * rest of the code free from ifdefs. |
161 | */ | 160 | */ |
162 | #define mci_readq(dev, reg) \ | 161 | #define mci_readq(dev, reg) \ |
163 | (*(volatile u64 __force *)(dev->regs + SDMMC_##reg)) | 162 | (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg)) |
164 | #define mci_writeq(dev, reg, value) \ | 163 | #define mci_writeq(dev, reg, value) \ |
165 | (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value) | 164 | (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value)) |
166 | #endif | 165 | #endif |
167 | 166 | ||
168 | #endif /* _DW_MMC_H_ */ | 167 | #endif /* _DW_MMC_H_ */ |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index fe140724a02e..fef7140eb1d0 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -226,6 +226,9 @@ static void __devinit mmci_dma_setup(struct mmci_host *host) | |||
226 | return; | 226 | return; |
227 | } | 227 | } |
228 | 228 | ||
229 | /* initialize pre request cookie */ | ||
230 | host->next_data.cookie = 1; | ||
231 | |||
229 | /* Try to acquire a generic DMA engine slave channel */ | 232 | /* Try to acquire a generic DMA engine slave channel */ |
230 | dma_cap_zero(mask); | 233 | dma_cap_zero(mask); |
231 | dma_cap_set(DMA_SLAVE, mask); | 234 | dma_cap_set(DMA_SLAVE, mask); |
@@ -335,7 +338,8 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) | |||
335 | dir = DMA_FROM_DEVICE; | 338 | dir = DMA_FROM_DEVICE; |
336 | } | 339 | } |
337 | 340 | ||
338 | dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); | 341 | if (!data->host_cookie) |
342 | dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); | ||
339 | 343 | ||
340 | /* | 344 | /* |
341 | * Use of DMA with scatter-gather is impossible. | 345 | * Use of DMA with scatter-gather is impossible. |
@@ -353,7 +357,8 @@ static void mmci_dma_data_error(struct mmci_host *host) | |||
353 | dmaengine_terminate_all(host->dma_current); | 357 | dmaengine_terminate_all(host->dma_current); |
354 | } | 358 | } |
355 | 359 | ||
356 | static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | 360 | static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, |
361 | struct mmci_host_next *next) | ||
357 | { | 362 | { |
358 | struct variant_data *variant = host->variant; | 363 | struct variant_data *variant = host->variant; |
359 | struct dma_slave_config conf = { | 364 | struct dma_slave_config conf = { |
@@ -364,13 +369,20 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | |||
364 | .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ | 369 | .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ |
365 | .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ | 370 | .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ |
366 | }; | 371 | }; |
367 | struct mmc_data *data = host->data; | ||
368 | struct dma_chan *chan; | 372 | struct dma_chan *chan; |
369 | struct dma_device *device; | 373 | struct dma_device *device; |
370 | struct dma_async_tx_descriptor *desc; | 374 | struct dma_async_tx_descriptor *desc; |
371 | int nr_sg; | 375 | int nr_sg; |
372 | 376 | ||
373 | host->dma_current = NULL; | 377 | /* Check if next job is already prepared */ |
378 | if (data->host_cookie && !next && | ||
379 | host->dma_current && host->dma_desc_current) | ||
380 | return 0; | ||
381 | |||
382 | if (!next) { | ||
383 | host->dma_current = NULL; | ||
384 | host->dma_desc_current = NULL; | ||
385 | } | ||
374 | 386 | ||
375 | if (data->flags & MMC_DATA_READ) { | 387 | if (data->flags & MMC_DATA_READ) { |
376 | conf.direction = DMA_FROM_DEVICE; | 388 | conf.direction = DMA_FROM_DEVICE; |
@@ -385,7 +397,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | |||
385 | return -EINVAL; | 397 | return -EINVAL; |
386 | 398 | ||
387 | /* If less than or equal to the fifo size, don't bother with DMA */ | 399 | /* If less than or equal to the fifo size, don't bother with DMA */ |
388 | if (host->size <= variant->fifosize) | 400 | if (data->blksz * data->blocks <= variant->fifosize) |
389 | return -EINVAL; | 401 | return -EINVAL; |
390 | 402 | ||
391 | device = chan->device; | 403 | device = chan->device; |
@@ -399,14 +411,38 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | |||
399 | if (!desc) | 411 | if (!desc) |
400 | goto unmap_exit; | 412 | goto unmap_exit; |
401 | 413 | ||
402 | /* Okay, go for it. */ | 414 | if (next) { |
403 | host->dma_current = chan; | 415 | next->dma_chan = chan; |
416 | next->dma_desc = desc; | ||
417 | } else { | ||
418 | host->dma_current = chan; | ||
419 | host->dma_desc_current = desc; | ||
420 | } | ||
421 | |||
422 | return 0; | ||
404 | 423 | ||
424 | unmap_exit: | ||
425 | if (!next) | ||
426 | dmaengine_terminate_all(chan); | ||
427 | dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); | ||
428 | return -ENOMEM; | ||
429 | } | ||
430 | |||
431 | static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | ||
432 | { | ||
433 | int ret; | ||
434 | struct mmc_data *data = host->data; | ||
435 | |||
436 | ret = mmci_dma_prep_data(host, host->data, NULL); | ||
437 | if (ret) | ||
438 | return ret; | ||
439 | |||
440 | /* Okay, go for it. */ | ||
405 | dev_vdbg(mmc_dev(host->mmc), | 441 | dev_vdbg(mmc_dev(host->mmc), |
406 | "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", | 442 | "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", |
407 | data->sg_len, data->blksz, data->blocks, data->flags); | 443 | data->sg_len, data->blksz, data->blocks, data->flags); |
408 | dmaengine_submit(desc); | 444 | dmaengine_submit(host->dma_desc_current); |
409 | dma_async_issue_pending(chan); | 445 | dma_async_issue_pending(host->dma_current); |
410 | 446 | ||
411 | datactrl |= MCI_DPSM_DMAENABLE; | 447 | datactrl |= MCI_DPSM_DMAENABLE; |
412 | 448 | ||
@@ -421,14 +457,90 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | |||
421 | writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, | 457 | writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, |
422 | host->base + MMCIMASK0); | 458 | host->base + MMCIMASK0); |
423 | return 0; | 459 | return 0; |
460 | } | ||
424 | 461 | ||
425 | unmap_exit: | 462 | static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) |
426 | dmaengine_terminate_all(chan); | 463 | { |
427 | dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); | 464 | struct mmci_host_next *next = &host->next_data; |
428 | return -ENOMEM; | 465 | |
466 | if (data->host_cookie && data->host_cookie != next->cookie) { | ||
467 | printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d" | ||
468 | " host->next_data.cookie %d\n", | ||
469 | __func__, data->host_cookie, host->next_data.cookie); | ||
470 | data->host_cookie = 0; | ||
471 | } | ||
472 | |||
473 | if (!data->host_cookie) | ||
474 | return; | ||
475 | |||
476 | host->dma_desc_current = next->dma_desc; | ||
477 | host->dma_current = next->dma_chan; | ||
478 | |||
479 | next->dma_desc = NULL; | ||
480 | next->dma_chan = NULL; | ||
429 | } | 481 | } |
482 | |||
483 | static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, | ||
484 | bool is_first_req) | ||
485 | { | ||
486 | struct mmci_host *host = mmc_priv(mmc); | ||
487 | struct mmc_data *data = mrq->data; | ||
488 | struct mmci_host_next *nd = &host->next_data; | ||
489 | |||
490 | if (!data) | ||
491 | return; | ||
492 | |||
493 | if (data->host_cookie) { | ||
494 | data->host_cookie = 0; | ||
495 | return; | ||
496 | } | ||
497 | |||
498 | /* if config for dma */ | ||
499 | if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || | ||
500 | ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { | ||
501 | if (mmci_dma_prep_data(host, data, nd)) | ||
502 | data->host_cookie = 0; | ||
503 | else | ||
504 | data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; | ||
505 | } | ||
506 | } | ||
507 | |||
508 | static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, | ||
509 | int err) | ||
510 | { | ||
511 | struct mmci_host *host = mmc_priv(mmc); | ||
512 | struct mmc_data *data = mrq->data; | ||
513 | struct dma_chan *chan; | ||
514 | enum dma_data_direction dir; | ||
515 | |||
516 | if (!data) | ||
517 | return; | ||
518 | |||
519 | if (data->flags & MMC_DATA_READ) { | ||
520 | dir = DMA_FROM_DEVICE; | ||
521 | chan = host->dma_rx_channel; | ||
522 | } else { | ||
523 | dir = DMA_TO_DEVICE; | ||
524 | chan = host->dma_tx_channel; | ||
525 | } | ||
526 | |||
527 | |||
528 | /* if config for dma */ | ||
529 | if (chan) { | ||
530 | if (err) | ||
531 | dmaengine_terminate_all(chan); | ||
532 | if (err || data->host_cookie) | ||
533 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | ||
534 | data->sg_len, dir); | ||
535 | mrq->data->host_cookie = 0; | ||
536 | } | ||
537 | } | ||
538 | |||
430 | #else | 539 | #else |
431 | /* Blank functions if the DMA engine is not available */ | 540 | /* Blank functions if the DMA engine is not available */ |
541 | static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) | ||
542 | { | ||
543 | } | ||
432 | static inline void mmci_dma_setup(struct mmci_host *host) | 544 | static inline void mmci_dma_setup(struct mmci_host *host) |
433 | { | 545 | { |
434 | } | 546 | } |
@@ -449,6 +561,10 @@ static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datac | |||
449 | { | 561 | { |
450 | return -ENOSYS; | 562 | return -ENOSYS; |
451 | } | 563 | } |
564 | |||
565 | #define mmci_pre_request NULL | ||
566 | #define mmci_post_request NULL | ||
567 | |||
452 | #endif | 568 | #endif |
453 | 569 | ||
454 | static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) | 570 | static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) |
@@ -872,6 +988,9 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
872 | 988 | ||
873 | host->mrq = mrq; | 989 | host->mrq = mrq; |
874 | 990 | ||
991 | if (mrq->data) | ||
992 | mmci_get_next_data(host, mrq->data); | ||
993 | |||
875 | if (mrq->data && mrq->data->flags & MMC_DATA_READ) | 994 | if (mrq->data && mrq->data->flags & MMC_DATA_READ) |
876 | mmci_start_data(host, mrq->data); | 995 | mmci_start_data(host, mrq->data); |
877 | 996 | ||
@@ -986,6 +1105,8 @@ static irqreturn_t mmci_cd_irq(int irq, void *dev_id) | |||
986 | 1105 | ||
987 | static const struct mmc_host_ops mmci_ops = { | 1106 | static const struct mmc_host_ops mmci_ops = { |
988 | .request = mmci_request, | 1107 | .request = mmci_request, |
1108 | .pre_req = mmci_pre_request, | ||
1109 | .post_req = mmci_post_request, | ||
989 | .set_ios = mmci_set_ios, | 1110 | .set_ios = mmci_set_ios, |
990 | .get_ro = mmci_get_ro, | 1111 | .get_ro = mmci_get_ro, |
991 | .get_cd = mmci_get_cd, | 1112 | .get_cd = mmci_get_cd, |
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 2164e8c6476c..79e4143ab9df 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h | |||
@@ -166,6 +166,12 @@ struct clk; | |||
166 | struct variant_data; | 166 | struct variant_data; |
167 | struct dma_chan; | 167 | struct dma_chan; |
168 | 168 | ||
169 | struct mmci_host_next { | ||
170 | struct dma_async_tx_descriptor *dma_desc; | ||
171 | struct dma_chan *dma_chan; | ||
172 | s32 cookie; | ||
173 | }; | ||
174 | |||
169 | struct mmci_host { | 175 | struct mmci_host { |
170 | phys_addr_t phybase; | 176 | phys_addr_t phybase; |
171 | void __iomem *base; | 177 | void __iomem *base; |
@@ -203,6 +209,8 @@ struct mmci_host { | |||
203 | struct dma_chan *dma_current; | 209 | struct dma_chan *dma_current; |
204 | struct dma_chan *dma_rx_channel; | 210 | struct dma_chan *dma_rx_channel; |
205 | struct dma_chan *dma_tx_channel; | 211 | struct dma_chan *dma_tx_channel; |
212 | struct dma_async_tx_descriptor *dma_desc_current; | ||
213 | struct mmci_host_next next_data; | ||
206 | 214 | ||
207 | #define dma_inprogress(host) ((host)->dma_current) | 215 | #define dma_inprogress(host) ((host)->dma_current) |
208 | #else | 216 | #else |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 99d39a6a1032..d513d47364d0 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
@@ -564,40 +564,38 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
564 | 564 | ||
565 | static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate) | 565 | static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate) |
566 | { | 566 | { |
567 | unsigned int ssp_rate, bit_rate; | 567 | unsigned int ssp_clk, ssp_sck; |
568 | u32 div1, div2; | 568 | u32 clock_divide, clock_rate; |
569 | u32 val; | 569 | u32 val; |
570 | 570 | ||
571 | ssp_rate = clk_get_rate(host->clk); | 571 | ssp_clk = clk_get_rate(host->clk); |
572 | 572 | ||
573 | for (div1 = 2; div1 < 254; div1 += 2) { | 573 | for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) { |
574 | div2 = ssp_rate / rate / div1; | 574 | clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide); |
575 | if (div2 < 0x100) | 575 | clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0; |
576 | if (clock_rate <= 255) | ||
576 | break; | 577 | break; |
577 | } | 578 | } |
578 | 579 | ||
579 | if (div1 >= 254) { | 580 | if (clock_divide > 254) { |
580 | dev_err(mmc_dev(host->mmc), | 581 | dev_err(mmc_dev(host->mmc), |
581 | "%s: cannot set clock to %d\n", __func__, rate); | 582 | "%s: cannot set clock to %d\n", __func__, rate); |
582 | return; | 583 | return; |
583 | } | 584 | } |
584 | 585 | ||
585 | if (div2 == 0) | 586 | ssp_sck = ssp_clk / clock_divide / (1 + clock_rate); |
586 | bit_rate = ssp_rate / div1; | ||
587 | else | ||
588 | bit_rate = ssp_rate / div1 / div2; | ||
589 | 587 | ||
590 | val = readl(host->base + HW_SSP_TIMING); | 588 | val = readl(host->base + HW_SSP_TIMING); |
591 | val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE); | 589 | val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE); |
592 | val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE); | 590 | val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE); |
593 | val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE); | 591 | val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE); |
594 | writel(val, host->base + HW_SSP_TIMING); | 592 | writel(val, host->base + HW_SSP_TIMING); |
595 | 593 | ||
596 | host->clk_rate = bit_rate; | 594 | host->clk_rate = ssp_sck; |
597 | 595 | ||
598 | dev_dbg(mmc_dev(host->mmc), | 596 | dev_dbg(mmc_dev(host->mmc), |
599 | "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n", | 597 | "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n", |
600 | __func__, div1, div2, ssp_rate, bit_rate, rate); | 598 | __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate); |
601 | } | 599 | } |
602 | 600 | ||
603 | static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 601 | static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index dedf3dab8a3b..21e4a799df48 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | ||
20 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
21 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
22 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
@@ -33,6 +34,7 @@ | |||
33 | #include <linux/semaphore.h> | 34 | #include <linux/semaphore.h> |
34 | #include <linux/gpio.h> | 35 | #include <linux/gpio.h> |
35 | #include <linux/regulator/consumer.h> | 36 | #include <linux/regulator/consumer.h> |
37 | #include <linux/pm_runtime.h> | ||
36 | #include <plat/dma.h> | 38 | #include <plat/dma.h> |
37 | #include <mach/hardware.h> | 39 | #include <mach/hardware.h> |
38 | #include <plat/board.h> | 40 | #include <plat/board.h> |
@@ -116,15 +118,13 @@ | |||
116 | #define OMAP_MMC4_DEVID 3 | 118 | #define OMAP_MMC4_DEVID 3 |
117 | #define OMAP_MMC5_DEVID 4 | 119 | #define OMAP_MMC5_DEVID 4 |
118 | 120 | ||
121 | #define MMC_AUTOSUSPEND_DELAY 100 | ||
119 | #define MMC_TIMEOUT_MS 20 | 122 | #define MMC_TIMEOUT_MS 20 |
120 | #define OMAP_MMC_MASTER_CLOCK 96000000 | 123 | #define OMAP_MMC_MASTER_CLOCK 96000000 |
124 | #define OMAP_MMC_MIN_CLOCK 400000 | ||
125 | #define OMAP_MMC_MAX_CLOCK 52000000 | ||
121 | #define DRIVER_NAME "omap_hsmmc" | 126 | #define DRIVER_NAME "omap_hsmmc" |
122 | 127 | ||
123 | /* Timeouts for entering power saving states on inactivity, msec */ | ||
124 | #define OMAP_MMC_DISABLED_TIMEOUT 100 | ||
125 | #define OMAP_MMC_SLEEP_TIMEOUT 1000 | ||
126 | #define OMAP_MMC_OFF_TIMEOUT 8000 | ||
127 | |||
128 | /* | 128 | /* |
129 | * One controller can have multiple slots, like on some omap boards using | 129 | * One controller can have multiple slots, like on some omap boards using |
130 | * omap.c controller driver. Luckily this is not currently done on any known | 130 | * omap.c controller driver. Luckily this is not currently done on any known |
@@ -141,6 +141,11 @@ | |||
141 | #define OMAP_HSMMC_WRITE(base, reg, val) \ | 141 | #define OMAP_HSMMC_WRITE(base, reg, val) \ |
142 | __raw_writel((val), (base) + OMAP_HSMMC_##reg) | 142 | __raw_writel((val), (base) + OMAP_HSMMC_##reg) |
143 | 143 | ||
144 | struct omap_hsmmc_next { | ||
145 | unsigned int dma_len; | ||
146 | s32 cookie; | ||
147 | }; | ||
148 | |||
144 | struct omap_hsmmc_host { | 149 | struct omap_hsmmc_host { |
145 | struct device *dev; | 150 | struct device *dev; |
146 | struct mmc_host *mmc; | 151 | struct mmc_host *mmc; |
@@ -148,7 +153,6 @@ struct omap_hsmmc_host { | |||
148 | struct mmc_command *cmd; | 153 | struct mmc_command *cmd; |
149 | struct mmc_data *data; | 154 | struct mmc_data *data; |
150 | struct clk *fclk; | 155 | struct clk *fclk; |
151 | struct clk *iclk; | ||
152 | struct clk *dbclk; | 156 | struct clk *dbclk; |
153 | /* | 157 | /* |
154 | * vcc == configured supply | 158 | * vcc == configured supply |
@@ -184,6 +188,7 @@ struct omap_hsmmc_host { | |||
184 | int reqs_blocked; | 188 | int reqs_blocked; |
185 | int use_reg; | 189 | int use_reg; |
186 | int req_in_progress; | 190 | int req_in_progress; |
191 | struct omap_hsmmc_next next_data; | ||
187 | 192 | ||
188 | struct omap_mmc_platform_data *pdata; | 193 | struct omap_mmc_platform_data *pdata; |
189 | }; | 194 | }; |
@@ -548,6 +553,15 @@ static void omap_hsmmc_gpio_free(struct omap_mmc_platform_data *pdata) | |||
548 | } | 553 | } |
549 | 554 | ||
550 | /* | 555 | /* |
556 | * Start clock to the card | ||
557 | */ | ||
558 | static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host) | ||
559 | { | ||
560 | OMAP_HSMMC_WRITE(host->base, SYSCTL, | ||
561 | OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); | ||
562 | } | ||
563 | |||
564 | /* | ||
551 | * Stop clock to the card | 565 | * Stop clock to the card |
552 | */ | 566 | */ |
553 | static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) | 567 | static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) |
@@ -584,6 +598,81 @@ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host) | |||
584 | OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); | 598 | OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); |
585 | } | 599 | } |
586 | 600 | ||
601 | /* Calculate divisor for the given clock frequency */ | ||
602 | static u16 calc_divisor(struct mmc_ios *ios) | ||
603 | { | ||
604 | u16 dsor = 0; | ||
605 | |||
606 | if (ios->clock) { | ||
607 | dsor = DIV_ROUND_UP(OMAP_MMC_MASTER_CLOCK, ios->clock); | ||
608 | if (dsor > 250) | ||
609 | dsor = 250; | ||
610 | } | ||
611 | |||
612 | return dsor; | ||
613 | } | ||
614 | |||
615 | static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) | ||
616 | { | ||
617 | struct mmc_ios *ios = &host->mmc->ios; | ||
618 | unsigned long regval; | ||
619 | unsigned long timeout; | ||
620 | |||
621 | dev_dbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); | ||
622 | |||
623 | omap_hsmmc_stop_clock(host); | ||
624 | |||
625 | regval = OMAP_HSMMC_READ(host->base, SYSCTL); | ||
626 | regval = regval & ~(CLKD_MASK | DTO_MASK); | ||
627 | regval = regval | (calc_divisor(ios) << 6) | (DTO << 16); | ||
628 | OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); | ||
629 | OMAP_HSMMC_WRITE(host->base, SYSCTL, | ||
630 | OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); | ||
631 | |||
632 | /* Wait till the ICS bit is set */ | ||
633 | timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); | ||
634 | while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS | ||
635 | && time_before(jiffies, timeout)) | ||
636 | cpu_relax(); | ||
637 | |||
638 | omap_hsmmc_start_clock(host); | ||
639 | } | ||
640 | |||
641 | static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host) | ||
642 | { | ||
643 | struct mmc_ios *ios = &host->mmc->ios; | ||
644 | u32 con; | ||
645 | |||
646 | con = OMAP_HSMMC_READ(host->base, CON); | ||
647 | switch (ios->bus_width) { | ||
648 | case MMC_BUS_WIDTH_8: | ||
649 | OMAP_HSMMC_WRITE(host->base, CON, con | DW8); | ||
650 | break; | ||
651 | case MMC_BUS_WIDTH_4: | ||
652 | OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); | ||
653 | OMAP_HSMMC_WRITE(host->base, HCTL, | ||
654 | OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); | ||
655 | break; | ||
656 | case MMC_BUS_WIDTH_1: | ||
657 | OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); | ||
658 | OMAP_HSMMC_WRITE(host->base, HCTL, | ||
659 | OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); | ||
660 | break; | ||
661 | } | ||
662 | } | ||
663 | |||
664 | static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host) | ||
665 | { | ||
666 | struct mmc_ios *ios = &host->mmc->ios; | ||
667 | u32 con; | ||
668 | |||
669 | con = OMAP_HSMMC_READ(host->base, CON); | ||
670 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) | ||
671 | OMAP_HSMMC_WRITE(host->base, CON, con | OD); | ||
672 | else | ||
673 | OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); | ||
674 | } | ||
675 | |||
587 | #ifdef CONFIG_PM | 676 | #ifdef CONFIG_PM |
588 | 677 | ||
589 | /* | 678 | /* |
@@ -595,8 +684,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) | |||
595 | struct mmc_ios *ios = &host->mmc->ios; | 684 | struct mmc_ios *ios = &host->mmc->ios; |
596 | struct omap_mmc_platform_data *pdata = host->pdata; | 685 | struct omap_mmc_platform_data *pdata = host->pdata; |
597 | int context_loss = 0; | 686 | int context_loss = 0; |
598 | u32 hctl, capa, con; | 687 | u32 hctl, capa; |
599 | u16 dsor = 0; | ||
600 | unsigned long timeout; | 688 | unsigned long timeout; |
601 | 689 | ||
602 | if (pdata->get_context_loss_count) { | 690 | if (pdata->get_context_loss_count) { |
@@ -658,54 +746,12 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) | |||
658 | if (host->power_mode == MMC_POWER_OFF) | 746 | if (host->power_mode == MMC_POWER_OFF) |
659 | goto out; | 747 | goto out; |
660 | 748 | ||
661 | con = OMAP_HSMMC_READ(host->base, CON); | 749 | omap_hsmmc_set_bus_width(host); |
662 | switch (ios->bus_width) { | ||
663 | case MMC_BUS_WIDTH_8: | ||
664 | OMAP_HSMMC_WRITE(host->base, CON, con | DW8); | ||
665 | break; | ||
666 | case MMC_BUS_WIDTH_4: | ||
667 | OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); | ||
668 | OMAP_HSMMC_WRITE(host->base, HCTL, | ||
669 | OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); | ||
670 | break; | ||
671 | case MMC_BUS_WIDTH_1: | ||
672 | OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); | ||
673 | OMAP_HSMMC_WRITE(host->base, HCTL, | ||
674 | OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); | ||
675 | break; | ||
676 | } | ||
677 | |||
678 | if (ios->clock) { | ||
679 | dsor = OMAP_MMC_MASTER_CLOCK / ios->clock; | ||
680 | if (dsor < 1) | ||
681 | dsor = 1; | ||
682 | |||
683 | if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock) | ||
684 | dsor++; | ||
685 | |||
686 | if (dsor > 250) | ||
687 | dsor = 250; | ||
688 | } | ||
689 | |||
690 | OMAP_HSMMC_WRITE(host->base, SYSCTL, | ||
691 | OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); | ||
692 | OMAP_HSMMC_WRITE(host->base, SYSCTL, (dsor << 6) | (DTO << 16)); | ||
693 | OMAP_HSMMC_WRITE(host->base, SYSCTL, | ||
694 | OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); | ||
695 | 750 | ||
696 | timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); | 751 | omap_hsmmc_set_clock(host); |
697 | while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS | ||
698 | && time_before(jiffies, timeout)) | ||
699 | ; | ||
700 | 752 | ||
701 | OMAP_HSMMC_WRITE(host->base, SYSCTL, | 753 | omap_hsmmc_set_bus_mode(host); |
702 | OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); | ||
703 | 754 | ||
704 | con = OMAP_HSMMC_READ(host->base, CON); | ||
705 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) | ||
706 | OMAP_HSMMC_WRITE(host->base, CON, con | OD); | ||
707 | else | ||
708 | OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); | ||
709 | out: | 755 | out: |
710 | host->context_loss = context_loss; | 756 | host->context_loss = context_loss; |
711 | 757 | ||
@@ -973,14 +1019,14 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) | |||
973 | * Readable error output | 1019 | * Readable error output |
974 | */ | 1020 | */ |
975 | #ifdef CONFIG_MMC_DEBUG | 1021 | #ifdef CONFIG_MMC_DEBUG |
976 | static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status) | 1022 | static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status) |
977 | { | 1023 | { |
978 | /* --- means reserved bit without definition at documentation */ | 1024 | /* --- means reserved bit without definition at documentation */ |
979 | static const char *omap_hsmmc_status_bits[] = { | 1025 | static const char *omap_hsmmc_status_bits[] = { |
980 | "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ", | 1026 | "CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" , |
981 | "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC", | 1027 | "CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI", |
982 | "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---", | 1028 | "CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" , |
983 | "---", "---", "---", "CERR", "CERR", "BADA", "---", "---", "---" | 1029 | "ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---" |
984 | }; | 1030 | }; |
985 | char res[256]; | 1031 | char res[256]; |
986 | char *buf = res; | 1032 | char *buf = res; |
@@ -997,6 +1043,11 @@ static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status) | |||
997 | 1043 | ||
998 | dev_dbg(mmc_dev(host->mmc), "%s\n", res); | 1044 | dev_dbg(mmc_dev(host->mmc), "%s\n", res); |
999 | } | 1045 | } |
1046 | #else | ||
1047 | static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, | ||
1048 | u32 status) | ||
1049 | { | ||
1050 | } | ||
1000 | #endif /* CONFIG_MMC_DEBUG */ | 1051 | #endif /* CONFIG_MMC_DEBUG */ |
1001 | 1052 | ||
1002 | /* | 1053 | /* |
@@ -1055,9 +1106,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) | |||
1055 | dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); | 1106 | dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); |
1056 | 1107 | ||
1057 | if (status & ERR) { | 1108 | if (status & ERR) { |
1058 | #ifdef CONFIG_MMC_DEBUG | 1109 | omap_hsmmc_dbg_report_irq(host, status); |
1059 | omap_hsmmc_report_irq(host, status); | ||
1060 | #endif | ||
1061 | if ((status & CMD_TIMEOUT) || | 1110 | if ((status & CMD_TIMEOUT) || |
1062 | (status & CMD_CRC)) { | 1111 | (status & CMD_CRC)) { |
1063 | if (host->cmd) { | 1112 | if (host->cmd) { |
@@ -1155,8 +1204,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) | |||
1155 | int ret; | 1204 | int ret; |
1156 | 1205 | ||
1157 | /* Disable the clocks */ | 1206 | /* Disable the clocks */ |
1158 | clk_disable(host->fclk); | 1207 | pm_runtime_put_sync(host->dev); |
1159 | clk_disable(host->iclk); | ||
1160 | if (host->got_dbclk) | 1208 | if (host->got_dbclk) |
1161 | clk_disable(host->dbclk); | 1209 | clk_disable(host->dbclk); |
1162 | 1210 | ||
@@ -1167,8 +1215,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) | |||
1167 | if (!ret) | 1215 | if (!ret) |
1168 | ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, | 1216 | ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, |
1169 | vdd); | 1217 | vdd); |
1170 | clk_enable(host->iclk); | 1218 | pm_runtime_get_sync(host->dev); |
1171 | clk_enable(host->fclk); | ||
1172 | if (host->got_dbclk) | 1219 | if (host->got_dbclk) |
1173 | clk_enable(host->dbclk); | 1220 | clk_enable(host->dbclk); |
1174 | 1221 | ||
@@ -1322,7 +1369,7 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, | |||
1322 | static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) | 1369 | static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) |
1323 | { | 1370 | { |
1324 | struct omap_hsmmc_host *host = cb_data; | 1371 | struct omap_hsmmc_host *host = cb_data; |
1325 | struct mmc_data *data = host->mrq->data; | 1372 | struct mmc_data *data; |
1326 | int dma_ch, req_in_progress; | 1373 | int dma_ch, req_in_progress; |
1327 | 1374 | ||
1328 | if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { | 1375 | if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { |
@@ -1337,6 +1384,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) | |||
1337 | return; | 1384 | return; |
1338 | } | 1385 | } |
1339 | 1386 | ||
1387 | data = host->mrq->data; | ||
1340 | host->dma_sg_idx++; | 1388 | host->dma_sg_idx++; |
1341 | if (host->dma_sg_idx < host->dma_len) { | 1389 | if (host->dma_sg_idx < host->dma_len) { |
1342 | /* Fire up the next transfer. */ | 1390 | /* Fire up the next transfer. */ |
@@ -1346,8 +1394,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) | |||
1346 | return; | 1394 | return; |
1347 | } | 1395 | } |
1348 | 1396 | ||
1349 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, | 1397 | if (!data->host_cookie) |
1350 | omap_hsmmc_get_dma_dir(host, data)); | 1398 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
1399 | omap_hsmmc_get_dma_dir(host, data)); | ||
1351 | 1400 | ||
1352 | req_in_progress = host->req_in_progress; | 1401 | req_in_progress = host->req_in_progress; |
1353 | dma_ch = host->dma_ch; | 1402 | dma_ch = host->dma_ch; |
@@ -1365,6 +1414,45 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) | |||
1365 | } | 1414 | } |
1366 | } | 1415 | } |
1367 | 1416 | ||
1417 | static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, | ||
1418 | struct mmc_data *data, | ||
1419 | struct omap_hsmmc_next *next) | ||
1420 | { | ||
1421 | int dma_len; | ||
1422 | |||
1423 | if (!next && data->host_cookie && | ||
1424 | data->host_cookie != host->next_data.cookie) { | ||
1425 | printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d" | ||
1426 | " host->next_data.cookie %d\n", | ||
1427 | __func__, data->host_cookie, host->next_data.cookie); | ||
1428 | data->host_cookie = 0; | ||
1429 | } | ||
1430 | |||
1431 | /* Check if next job is already prepared */ | ||
1432 | if (next || | ||
1433 | (!next && data->host_cookie != host->next_data.cookie)) { | ||
1434 | dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, | ||
1435 | data->sg_len, | ||
1436 | omap_hsmmc_get_dma_dir(host, data)); | ||
1437 | |||
1438 | } else { | ||
1439 | dma_len = host->next_data.dma_len; | ||
1440 | host->next_data.dma_len = 0; | ||
1441 | } | ||
1442 | |||
1443 | |||
1444 | if (dma_len == 0) | ||
1445 | return -EINVAL; | ||
1446 | |||
1447 | if (next) { | ||
1448 | next->dma_len = dma_len; | ||
1449 | data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; | ||
1450 | } else | ||
1451 | host->dma_len = dma_len; | ||
1452 | |||
1453 | return 0; | ||
1454 | } | ||
1455 | |||
1368 | /* | 1456 | /* |
1369 | * Routine to configure and start DMA for the MMC card | 1457 | * Routine to configure and start DMA for the MMC card |
1370 | */ | 1458 | */ |
@@ -1398,9 +1486,10 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, | |||
1398 | mmc_hostname(host->mmc), ret); | 1486 | mmc_hostname(host->mmc), ret); |
1399 | return ret; | 1487 | return ret; |
1400 | } | 1488 | } |
1489 | ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); | ||
1490 | if (ret) | ||
1491 | return ret; | ||
1401 | 1492 | ||
1402 | host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, | ||
1403 | data->sg_len, omap_hsmmc_get_dma_dir(host, data)); | ||
1404 | host->dma_ch = dma_ch; | 1493 | host->dma_ch = dma_ch; |
1405 | host->dma_sg_idx = 0; | 1494 | host->dma_sg_idx = 0; |
1406 | 1495 | ||
@@ -1480,6 +1569,35 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req) | |||
1480 | return 0; | 1569 | return 0; |
1481 | } | 1570 | } |
1482 | 1571 | ||
1572 | static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, | ||
1573 | int err) | ||
1574 | { | ||
1575 | struct omap_hsmmc_host *host = mmc_priv(mmc); | ||
1576 | struct mmc_data *data = mrq->data; | ||
1577 | |||
1578 | if (host->use_dma) { | ||
1579 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, | ||
1580 | omap_hsmmc_get_dma_dir(host, data)); | ||
1581 | data->host_cookie = 0; | ||
1582 | } | ||
1583 | } | ||
1584 | |||
1585 | static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, | ||
1586 | bool is_first_req) | ||
1587 | { | ||
1588 | struct omap_hsmmc_host *host = mmc_priv(mmc); | ||
1589 | |||
1590 | if (mrq->data->host_cookie) { | ||
1591 | mrq->data->host_cookie = 0; | ||
1592 | return ; | ||
1593 | } | ||
1594 | |||
1595 | if (host->use_dma) | ||
1596 | if (omap_hsmmc_pre_dma_transfer(host, mrq->data, | ||
1597 | &host->next_data)) | ||
1598 | mrq->data->host_cookie = 0; | ||
1599 | } | ||
1600 | |||
1483 | /* | 1601 | /* |
1484 | * Request function. for read/write operation | 1602 | * Request function. for read/write operation |
1485 | */ | 1603 | */ |
@@ -1528,13 +1646,9 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) | |||
1528 | static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 1646 | static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
1529 | { | 1647 | { |
1530 | struct omap_hsmmc_host *host = mmc_priv(mmc); | 1648 | struct omap_hsmmc_host *host = mmc_priv(mmc); |
1531 | u16 dsor = 0; | ||
1532 | unsigned long regval; | ||
1533 | unsigned long timeout; | ||
1534 | u32 con; | ||
1535 | int do_send_init_stream = 0; | 1649 | int do_send_init_stream = 0; |
1536 | 1650 | ||
1537 | mmc_host_enable(host->mmc); | 1651 | pm_runtime_get_sync(host->dev); |
1538 | 1652 | ||
1539 | if (ios->power_mode != host->power_mode) { | 1653 | if (ios->power_mode != host->power_mode) { |
1540 | switch (ios->power_mode) { | 1654 | switch (ios->power_mode) { |
@@ -1557,22 +1671,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1557 | 1671 | ||
1558 | /* FIXME: set registers based only on changes to ios */ | 1672 | /* FIXME: set registers based only on changes to ios */ |
1559 | 1673 | ||
1560 | con = OMAP_HSMMC_READ(host->base, CON); | 1674 | omap_hsmmc_set_bus_width(host); |
1561 | switch (mmc->ios.bus_width) { | ||
1562 | case MMC_BUS_WIDTH_8: | ||
1563 | OMAP_HSMMC_WRITE(host->base, CON, con | DW8); | ||
1564 | break; | ||
1565 | case MMC_BUS_WIDTH_4: | ||
1566 | OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); | ||
1567 | OMAP_HSMMC_WRITE(host->base, HCTL, | ||
1568 | OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); | ||
1569 | break; | ||
1570 | case MMC_BUS_WIDTH_1: | ||
1571 | OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); | ||
1572 | OMAP_HSMMC_WRITE(host->base, HCTL, | ||
1573 | OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); | ||
1574 | break; | ||
1575 | } | ||
1576 | 1675 | ||
1577 | if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { | 1676 | if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { |
1578 | /* Only MMC1 can interface at 3V without some flavor | 1677 | /* Only MMC1 can interface at 3V without some flavor |
@@ -1592,47 +1691,14 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1592 | } | 1691 | } |
1593 | } | 1692 | } |
1594 | 1693 | ||
1595 | if (ios->clock) { | 1694 | omap_hsmmc_set_clock(host); |
1596 | dsor = OMAP_MMC_MASTER_CLOCK / ios->clock; | ||
1597 | if (dsor < 1) | ||
1598 | dsor = 1; | ||
1599 | |||
1600 | if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock) | ||
1601 | dsor++; | ||
1602 | |||
1603 | if (dsor > 250) | ||
1604 | dsor = 250; | ||
1605 | } | ||
1606 | omap_hsmmc_stop_clock(host); | ||
1607 | regval = OMAP_HSMMC_READ(host->base, SYSCTL); | ||
1608 | regval = regval & ~(CLKD_MASK); | ||
1609 | regval = regval | (dsor << 6) | (DTO << 16); | ||
1610 | OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); | ||
1611 | OMAP_HSMMC_WRITE(host->base, SYSCTL, | ||
1612 | OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); | ||
1613 | |||
1614 | /* Wait till the ICS bit is set */ | ||
1615 | timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); | ||
1616 | while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS | ||
1617 | && time_before(jiffies, timeout)) | ||
1618 | msleep(1); | ||
1619 | |||
1620 | OMAP_HSMMC_WRITE(host->base, SYSCTL, | ||
1621 | OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); | ||
1622 | 1695 | ||
1623 | if (do_send_init_stream) | 1696 | if (do_send_init_stream) |
1624 | send_init_stream(host); | 1697 | send_init_stream(host); |
1625 | 1698 | ||
1626 | con = OMAP_HSMMC_READ(host->base, CON); | 1699 | omap_hsmmc_set_bus_mode(host); |
1627 | if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) | ||
1628 | OMAP_HSMMC_WRITE(host->base, CON, con | OD); | ||
1629 | else | ||
1630 | OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); | ||
1631 | 1700 | ||
1632 | if (host->power_mode == MMC_POWER_OFF) | 1701 | pm_runtime_put_autosuspend(host->dev); |
1633 | mmc_host_disable(host->mmc); | ||
1634 | else | ||
1635 | mmc_host_lazy_disable(host->mmc); | ||
1636 | } | 1702 | } |
1637 | 1703 | ||
1638 | static int omap_hsmmc_get_cd(struct mmc_host *mmc) | 1704 | static int omap_hsmmc_get_cd(struct mmc_host *mmc) |
@@ -1688,230 +1754,12 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) | |||
1688 | set_sd_bus_power(host); | 1754 | set_sd_bus_power(host); |
1689 | } | 1755 | } |
1690 | 1756 | ||
1691 | /* | ||
1692 | * Dynamic power saving handling, FSM: | ||
1693 | * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF | ||
1694 | * ^___________| | | | ||
1695 | * |______________________|______________________| | ||
1696 | * | ||
1697 | * ENABLED: mmc host is fully functional | ||
1698 | * DISABLED: fclk is off | ||
1699 | * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep | ||
1700 | * REGSLEEP: fclk is off, voltage regulator is asleep | ||
1701 | * OFF: fclk is off, voltage regulator is off | ||
1702 | * | ||
1703 | * Transition handlers return the timeout for the next state transition | ||
1704 | * or negative error. | ||
1705 | */ | ||
1706 | |||
1707 | enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF}; | ||
1708 | |||
1709 | /* Handler for [ENABLED -> DISABLED] transition */ | ||
1710 | static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host) | ||
1711 | { | ||
1712 | omap_hsmmc_context_save(host); | ||
1713 | clk_disable(host->fclk); | ||
1714 | host->dpm_state = DISABLED; | ||
1715 | |||
1716 | dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n"); | ||
1717 | |||
1718 | if (host->power_mode == MMC_POWER_OFF) | ||
1719 | return 0; | ||
1720 | |||
1721 | return OMAP_MMC_SLEEP_TIMEOUT; | ||
1722 | } | ||
1723 | |||
1724 | /* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */ | ||
1725 | static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host) | ||
1726 | { | ||
1727 | int err, new_state; | ||
1728 | |||
1729 | if (!mmc_try_claim_host(host->mmc)) | ||
1730 | return 0; | ||
1731 | |||
1732 | clk_enable(host->fclk); | ||
1733 | omap_hsmmc_context_restore(host); | ||
1734 | if (mmc_card_can_sleep(host->mmc)) { | ||
1735 | err = mmc_card_sleep(host->mmc); | ||
1736 | if (err < 0) { | ||
1737 | clk_disable(host->fclk); | ||
1738 | mmc_release_host(host->mmc); | ||
1739 | return err; | ||
1740 | } | ||
1741 | new_state = CARDSLEEP; | ||
1742 | } else { | ||
1743 | new_state = REGSLEEP; | ||
1744 | } | ||
1745 | if (mmc_slot(host).set_sleep) | ||
1746 | mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0, | ||
1747 | new_state == CARDSLEEP); | ||
1748 | /* FIXME: turn off bus power and perhaps interrupts too */ | ||
1749 | clk_disable(host->fclk); | ||
1750 | host->dpm_state = new_state; | ||
1751 | |||
1752 | mmc_release_host(host->mmc); | ||
1753 | |||
1754 | dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n", | ||
1755 | host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); | ||
1756 | |||
1757 | if (mmc_slot(host).no_off) | ||
1758 | return 0; | ||
1759 | |||
1760 | if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) || | ||
1761 | mmc_slot(host).card_detect || | ||
1762 | (mmc_slot(host).get_cover_state && | ||
1763 | mmc_slot(host).get_cover_state(host->dev, host->slot_id))) | ||
1764 | return OMAP_MMC_OFF_TIMEOUT; | ||
1765 | |||
1766 | return 0; | ||
1767 | } | ||
1768 | |||
1769 | /* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */ | ||
1770 | static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host) | ||
1771 | { | ||
1772 | if (!mmc_try_claim_host(host->mmc)) | ||
1773 | return 0; | ||
1774 | |||
1775 | if (mmc_slot(host).no_off) | ||
1776 | return 0; | ||
1777 | |||
1778 | if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) || | ||
1779 | mmc_slot(host).card_detect || | ||
1780 | (mmc_slot(host).get_cover_state && | ||
1781 | mmc_slot(host).get_cover_state(host->dev, host->slot_id)))) { | ||
1782 | mmc_release_host(host->mmc); | ||
1783 | return 0; | ||
1784 | } | ||
1785 | |||
1786 | mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); | ||
1787 | host->vdd = 0; | ||
1788 | host->power_mode = MMC_POWER_OFF; | ||
1789 | |||
1790 | dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n", | ||
1791 | host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); | ||
1792 | |||
1793 | host->dpm_state = OFF; | ||
1794 | |||
1795 | mmc_release_host(host->mmc); | ||
1796 | |||
1797 | return 0; | ||
1798 | } | ||
1799 | |||
1800 | /* Handler for [DISABLED -> ENABLED] transition */ | ||
1801 | static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host) | ||
1802 | { | ||
1803 | int err; | ||
1804 | |||
1805 | err = clk_enable(host->fclk); | ||
1806 | if (err < 0) | ||
1807 | return err; | ||
1808 | |||
1809 | omap_hsmmc_context_restore(host); | ||
1810 | host->dpm_state = ENABLED; | ||
1811 | |||
1812 | dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n"); | ||
1813 | |||
1814 | return 0; | ||
1815 | } | ||
1816 | |||
1817 | /* Handler for [SLEEP -> ENABLED] transition */ | ||
1818 | static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host) | ||
1819 | { | ||
1820 | if (!mmc_try_claim_host(host->mmc)) | ||
1821 | return 0; | ||
1822 | |||
1823 | clk_enable(host->fclk); | ||
1824 | omap_hsmmc_context_restore(host); | ||
1825 | if (mmc_slot(host).set_sleep) | ||
1826 | mmc_slot(host).set_sleep(host->dev, host->slot_id, 0, | ||
1827 | host->vdd, host->dpm_state == CARDSLEEP); | ||
1828 | if (mmc_card_can_sleep(host->mmc)) | ||
1829 | mmc_card_awake(host->mmc); | ||
1830 | |||
1831 | dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n", | ||
1832 | host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); | ||
1833 | |||
1834 | host->dpm_state = ENABLED; | ||
1835 | |||
1836 | mmc_release_host(host->mmc); | ||
1837 | |||
1838 | return 0; | ||
1839 | } | ||
1840 | |||
1841 | /* Handler for [OFF -> ENABLED] transition */ | ||
1842 | static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host) | ||
1843 | { | ||
1844 | clk_enable(host->fclk); | ||
1845 | |||
1846 | omap_hsmmc_context_restore(host); | ||
1847 | omap_hsmmc_conf_bus_power(host); | ||
1848 | mmc_power_restore_host(host->mmc); | ||
1849 | |||
1850 | host->dpm_state = ENABLED; | ||
1851 | |||
1852 | dev_dbg(mmc_dev(host->mmc), "OFF -> ENABLED\n"); | ||
1853 | |||
1854 | return 0; | ||
1855 | } | ||
1856 | |||
1857 | /* | ||
1858 | * Bring MMC host to ENABLED from any other PM state. | ||
1859 | */ | ||
1860 | static int omap_hsmmc_enable(struct mmc_host *mmc) | ||
1861 | { | ||
1862 | struct omap_hsmmc_host *host = mmc_priv(mmc); | ||
1863 | |||
1864 | switch (host->dpm_state) { | ||
1865 | case DISABLED: | ||
1866 | return omap_hsmmc_disabled_to_enabled(host); | ||
1867 | case CARDSLEEP: | ||
1868 | case REGSLEEP: | ||
1869 | return omap_hsmmc_sleep_to_enabled(host); | ||
1870 | case OFF: | ||
1871 | return omap_hsmmc_off_to_enabled(host); | ||
1872 | default: | ||
1873 | dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n"); | ||
1874 | return -EINVAL; | ||
1875 | } | ||
1876 | } | ||
1877 | |||
1878 | /* | ||
1879 | * Bring MMC host in PM state (one level deeper). | ||
1880 | */ | ||
1881 | static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy) | ||
1882 | { | ||
1883 | struct omap_hsmmc_host *host = mmc_priv(mmc); | ||
1884 | |||
1885 | switch (host->dpm_state) { | ||
1886 | case ENABLED: { | ||
1887 | int delay; | ||
1888 | |||
1889 | delay = omap_hsmmc_enabled_to_disabled(host); | ||
1890 | if (lazy || delay < 0) | ||
1891 | return delay; | ||
1892 | return 0; | ||
1893 | } | ||
1894 | case DISABLED: | ||
1895 | return omap_hsmmc_disabled_to_sleep(host); | ||
1896 | case CARDSLEEP: | ||
1897 | case REGSLEEP: | ||
1898 | return omap_hsmmc_sleep_to_off(host); | ||
1899 | default: | ||
1900 | dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n"); | ||
1901 | return -EINVAL; | ||
1902 | } | ||
1903 | } | ||
1904 | |||
1905 | static int omap_hsmmc_enable_fclk(struct mmc_host *mmc) | 1757 | static int omap_hsmmc_enable_fclk(struct mmc_host *mmc) |
1906 | { | 1758 | { |
1907 | struct omap_hsmmc_host *host = mmc_priv(mmc); | 1759 | struct omap_hsmmc_host *host = mmc_priv(mmc); |
1908 | int err; | ||
1909 | 1760 | ||
1910 | err = clk_enable(host->fclk); | 1761 | pm_runtime_get_sync(host->dev); |
1911 | if (err) | 1762 | |
1912 | return err; | ||
1913 | dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n"); | ||
1914 | omap_hsmmc_context_restore(host); | ||
1915 | return 0; | 1763 | return 0; |
1916 | } | 1764 | } |
1917 | 1765 | ||
@@ -1919,26 +1767,17 @@ static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy) | |||
1919 | { | 1767 | { |
1920 | struct omap_hsmmc_host *host = mmc_priv(mmc); | 1768 | struct omap_hsmmc_host *host = mmc_priv(mmc); |
1921 | 1769 | ||
1922 | omap_hsmmc_context_save(host); | 1770 | pm_runtime_mark_last_busy(host->dev); |
1923 | clk_disable(host->fclk); | 1771 | pm_runtime_put_autosuspend(host->dev); |
1924 | dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n"); | 1772 | |
1925 | return 0; | 1773 | return 0; |
1926 | } | 1774 | } |
1927 | 1775 | ||
1928 | static const struct mmc_host_ops omap_hsmmc_ops = { | 1776 | static const struct mmc_host_ops omap_hsmmc_ops = { |
1929 | .enable = omap_hsmmc_enable_fclk, | 1777 | .enable = omap_hsmmc_enable_fclk, |
1930 | .disable = omap_hsmmc_disable_fclk, | 1778 | .disable = omap_hsmmc_disable_fclk, |
1931 | .request = omap_hsmmc_request, | 1779 | .post_req = omap_hsmmc_post_req, |
1932 | .set_ios = omap_hsmmc_set_ios, | 1780 | .pre_req = omap_hsmmc_pre_req, |
1933 | .get_cd = omap_hsmmc_get_cd, | ||
1934 | .get_ro = omap_hsmmc_get_ro, | ||
1935 | .init_card = omap_hsmmc_init_card, | ||
1936 | /* NYET -- enable_sdio_irq */ | ||
1937 | }; | ||
1938 | |||
1939 | static const struct mmc_host_ops omap_hsmmc_ps_ops = { | ||
1940 | .enable = omap_hsmmc_enable, | ||
1941 | .disable = omap_hsmmc_disable, | ||
1942 | .request = omap_hsmmc_request, | 1781 | .request = omap_hsmmc_request, |
1943 | .set_ios = omap_hsmmc_set_ios, | 1782 | .set_ios = omap_hsmmc_set_ios, |
1944 | .get_cd = omap_hsmmc_get_cd, | 1783 | .get_cd = omap_hsmmc_get_cd, |
@@ -1968,15 +1807,12 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data) | |||
1968 | host->dpm_state, mmc->nesting_cnt, | 1807 | host->dpm_state, mmc->nesting_cnt, |
1969 | host->context_loss, context_loss); | 1808 | host->context_loss, context_loss); |
1970 | 1809 | ||
1971 | if (host->suspended || host->dpm_state == OFF) { | 1810 | if (host->suspended) { |
1972 | seq_printf(s, "host suspended, can't read registers\n"); | 1811 | seq_printf(s, "host suspended, can't read registers\n"); |
1973 | return 0; | 1812 | return 0; |
1974 | } | 1813 | } |
1975 | 1814 | ||
1976 | if (clk_enable(host->fclk) != 0) { | 1815 | pm_runtime_get_sync(host->dev); |
1977 | seq_printf(s, "can't read the regs\n"); | ||
1978 | return 0; | ||
1979 | } | ||
1980 | 1816 | ||
1981 | seq_printf(s, "SYSCONFIG:\t0x%08x\n", | 1817 | seq_printf(s, "SYSCONFIG:\t0x%08x\n", |
1982 | OMAP_HSMMC_READ(host->base, SYSCONFIG)); | 1818 | OMAP_HSMMC_READ(host->base, SYSCONFIG)); |
@@ -1993,7 +1829,8 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data) | |||
1993 | seq_printf(s, "CAPA:\t\t0x%08x\n", | 1829 | seq_printf(s, "CAPA:\t\t0x%08x\n", |
1994 | OMAP_HSMMC_READ(host->base, CAPA)); | 1830 | OMAP_HSMMC_READ(host->base, CAPA)); |
1995 | 1831 | ||
1996 | clk_disable(host->fclk); | 1832 | pm_runtime_mark_last_busy(host->dev); |
1833 | pm_runtime_put_autosuspend(host->dev); | ||
1997 | 1834 | ||
1998 | return 0; | 1835 | return 0; |
1999 | } | 1836 | } |
@@ -2077,14 +1914,12 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) | |||
2077 | host->mapbase = res->start; | 1914 | host->mapbase = res->start; |
2078 | host->base = ioremap(host->mapbase, SZ_4K); | 1915 | host->base = ioremap(host->mapbase, SZ_4K); |
2079 | host->power_mode = MMC_POWER_OFF; | 1916 | host->power_mode = MMC_POWER_OFF; |
1917 | host->next_data.cookie = 1; | ||
2080 | 1918 | ||
2081 | platform_set_drvdata(pdev, host); | 1919 | platform_set_drvdata(pdev, host); |
2082 | INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect); | 1920 | INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect); |
2083 | 1921 | ||
2084 | if (mmc_slot(host).power_saving) | 1922 | mmc->ops = &omap_hsmmc_ops; |
2085 | mmc->ops = &omap_hsmmc_ps_ops; | ||
2086 | else | ||
2087 | mmc->ops = &omap_hsmmc_ops; | ||
2088 | 1923 | ||
2089 | /* | 1924 | /* |
2090 | * If regulator_disable can only put vcc_aux to sleep then there is | 1925 | * If regulator_disable can only put vcc_aux to sleep then there is |
@@ -2093,44 +1928,26 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) | |||
2093 | if (mmc_slot(host).vcc_aux_disable_is_sleep) | 1928 | if (mmc_slot(host).vcc_aux_disable_is_sleep) |
2094 | mmc_slot(host).no_off = 1; | 1929 | mmc_slot(host).no_off = 1; |
2095 | 1930 | ||
2096 | mmc->f_min = 400000; | 1931 | mmc->f_min = OMAP_MMC_MIN_CLOCK; |
2097 | mmc->f_max = 52000000; | 1932 | mmc->f_max = OMAP_MMC_MAX_CLOCK; |
2098 | 1933 | ||
2099 | spin_lock_init(&host->irq_lock); | 1934 | spin_lock_init(&host->irq_lock); |
2100 | 1935 | ||
2101 | host->iclk = clk_get(&pdev->dev, "ick"); | ||
2102 | if (IS_ERR(host->iclk)) { | ||
2103 | ret = PTR_ERR(host->iclk); | ||
2104 | host->iclk = NULL; | ||
2105 | goto err1; | ||
2106 | } | ||
2107 | host->fclk = clk_get(&pdev->dev, "fck"); | 1936 | host->fclk = clk_get(&pdev->dev, "fck"); |
2108 | if (IS_ERR(host->fclk)) { | 1937 | if (IS_ERR(host->fclk)) { |
2109 | ret = PTR_ERR(host->fclk); | 1938 | ret = PTR_ERR(host->fclk); |
2110 | host->fclk = NULL; | 1939 | host->fclk = NULL; |
2111 | clk_put(host->iclk); | ||
2112 | goto err1; | 1940 | goto err1; |
2113 | } | 1941 | } |
2114 | 1942 | ||
2115 | omap_hsmmc_context_save(host); | 1943 | omap_hsmmc_context_save(host); |
2116 | 1944 | ||
2117 | mmc->caps |= MMC_CAP_DISABLE; | 1945 | mmc->caps |= MMC_CAP_DISABLE; |
2118 | mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT); | ||
2119 | /* we start off in DISABLED state */ | ||
2120 | host->dpm_state = DISABLED; | ||
2121 | 1946 | ||
2122 | if (clk_enable(host->iclk) != 0) { | 1947 | pm_runtime_enable(host->dev); |
2123 | clk_put(host->iclk); | 1948 | pm_runtime_get_sync(host->dev); |
2124 | clk_put(host->fclk); | 1949 | pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); |
2125 | goto err1; | 1950 | pm_runtime_use_autosuspend(host->dev); |
2126 | } | ||
2127 | |||
2128 | if (mmc_host_enable(host->mmc) != 0) { | ||
2129 | clk_disable(host->iclk); | ||
2130 | clk_put(host->iclk); | ||
2131 | clk_put(host->fclk); | ||
2132 | goto err1; | ||
2133 | } | ||
2134 | 1951 | ||
2135 | if (cpu_is_omap2430()) { | 1952 | if (cpu_is_omap2430()) { |
2136 | host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); | 1953 | host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); |
@@ -2240,8 +2057,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) | |||
2240 | 2057 | ||
2241 | omap_hsmmc_disable_irq(host); | 2058 | omap_hsmmc_disable_irq(host); |
2242 | 2059 | ||
2243 | mmc_host_lazy_disable(host->mmc); | ||
2244 | |||
2245 | omap_hsmmc_protect_card(host); | 2060 | omap_hsmmc_protect_card(host); |
2246 | 2061 | ||
2247 | mmc_add_host(mmc); | 2062 | mmc_add_host(mmc); |
@@ -2259,6 +2074,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) | |||
2259 | } | 2074 | } |
2260 | 2075 | ||
2261 | omap_hsmmc_debugfs(mmc); | 2076 | omap_hsmmc_debugfs(mmc); |
2077 | pm_runtime_mark_last_busy(host->dev); | ||
2078 | pm_runtime_put_autosuspend(host->dev); | ||
2262 | 2079 | ||
2263 | return 0; | 2080 | return 0; |
2264 | 2081 | ||
@@ -2274,10 +2091,9 @@ err_reg: | |||
2274 | err_irq_cd_init: | 2091 | err_irq_cd_init: |
2275 | free_irq(host->irq, host); | 2092 | free_irq(host->irq, host); |
2276 | err_irq: | 2093 | err_irq: |
2277 | mmc_host_disable(host->mmc); | 2094 | pm_runtime_mark_last_busy(host->dev); |
2278 | clk_disable(host->iclk); | 2095 | pm_runtime_put_autosuspend(host->dev); |
2279 | clk_put(host->fclk); | 2096 | clk_put(host->fclk); |
2280 | clk_put(host->iclk); | ||
2281 | if (host->got_dbclk) { | 2097 | if (host->got_dbclk) { |
2282 | clk_disable(host->dbclk); | 2098 | clk_disable(host->dbclk); |
2283 | clk_put(host->dbclk); | 2099 | clk_put(host->dbclk); |
@@ -2299,7 +2115,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev) | |||
2299 | struct resource *res; | 2115 | struct resource *res; |
2300 | 2116 | ||
2301 | if (host) { | 2117 | if (host) { |
2302 | mmc_host_enable(host->mmc); | 2118 | pm_runtime_get_sync(host->dev); |
2303 | mmc_remove_host(host->mmc); | 2119 | mmc_remove_host(host->mmc); |
2304 | if (host->use_reg) | 2120 | if (host->use_reg) |
2305 | omap_hsmmc_reg_put(host); | 2121 | omap_hsmmc_reg_put(host); |
@@ -2310,10 +2126,9 @@ static int omap_hsmmc_remove(struct platform_device *pdev) | |||
2310 | free_irq(mmc_slot(host).card_detect_irq, host); | 2126 | free_irq(mmc_slot(host).card_detect_irq, host); |
2311 | flush_work_sync(&host->mmc_carddetect_work); | 2127 | flush_work_sync(&host->mmc_carddetect_work); |
2312 | 2128 | ||
2313 | mmc_host_disable(host->mmc); | 2129 | pm_runtime_put_sync(host->dev); |
2314 | clk_disable(host->iclk); | 2130 | pm_runtime_disable(host->dev); |
2315 | clk_put(host->fclk); | 2131 | clk_put(host->fclk); |
2316 | clk_put(host->iclk); | ||
2317 | if (host->got_dbclk) { | 2132 | if (host->got_dbclk) { |
2318 | clk_disable(host->dbclk); | 2133 | clk_disable(host->dbclk); |
2319 | clk_put(host->dbclk); | 2134 | clk_put(host->dbclk); |
@@ -2343,6 +2158,7 @@ static int omap_hsmmc_suspend(struct device *dev) | |||
2343 | return 0; | 2158 | return 0; |
2344 | 2159 | ||
2345 | if (host) { | 2160 | if (host) { |
2161 | pm_runtime_get_sync(host->dev); | ||
2346 | host->suspended = 1; | 2162 | host->suspended = 1; |
2347 | if (host->pdata->suspend) { | 2163 | if (host->pdata->suspend) { |
2348 | ret = host->pdata->suspend(&pdev->dev, | 2164 | ret = host->pdata->suspend(&pdev->dev, |
@@ -2357,13 +2173,11 @@ static int omap_hsmmc_suspend(struct device *dev) | |||
2357 | } | 2173 | } |
2358 | cancel_work_sync(&host->mmc_carddetect_work); | 2174 | cancel_work_sync(&host->mmc_carddetect_work); |
2359 | ret = mmc_suspend_host(host->mmc); | 2175 | ret = mmc_suspend_host(host->mmc); |
2360 | mmc_host_enable(host->mmc); | 2176 | |
2361 | if (ret == 0) { | 2177 | if (ret == 0) { |
2362 | omap_hsmmc_disable_irq(host); | 2178 | omap_hsmmc_disable_irq(host); |
2363 | OMAP_HSMMC_WRITE(host->base, HCTL, | 2179 | OMAP_HSMMC_WRITE(host->base, HCTL, |
2364 | OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); | 2180 | OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); |
2365 | mmc_host_disable(host->mmc); | ||
2366 | clk_disable(host->iclk); | ||
2367 | if (host->got_dbclk) | 2181 | if (host->got_dbclk) |
2368 | clk_disable(host->dbclk); | 2182 | clk_disable(host->dbclk); |
2369 | } else { | 2183 | } else { |
@@ -2375,9 +2189,8 @@ static int omap_hsmmc_suspend(struct device *dev) | |||
2375 | dev_dbg(mmc_dev(host->mmc), | 2189 | dev_dbg(mmc_dev(host->mmc), |
2376 | "Unmask interrupt failed\n"); | 2190 | "Unmask interrupt failed\n"); |
2377 | } | 2191 | } |
2378 | mmc_host_disable(host->mmc); | ||
2379 | } | 2192 | } |
2380 | 2193 | pm_runtime_put_sync(host->dev); | |
2381 | } | 2194 | } |
2382 | return ret; | 2195 | return ret; |
2383 | } | 2196 | } |
@@ -2393,14 +2206,7 @@ static int omap_hsmmc_resume(struct device *dev) | |||
2393 | return 0; | 2206 | return 0; |
2394 | 2207 | ||
2395 | if (host) { | 2208 | if (host) { |
2396 | ret = clk_enable(host->iclk); | 2209 | pm_runtime_get_sync(host->dev); |
2397 | if (ret) | ||
2398 | goto clk_en_err; | ||
2399 | |||
2400 | if (mmc_host_enable(host->mmc) != 0) { | ||
2401 | clk_disable(host->iclk); | ||
2402 | goto clk_en_err; | ||
2403 | } | ||
2404 | 2210 | ||
2405 | if (host->got_dbclk) | 2211 | if (host->got_dbclk) |
2406 | clk_enable(host->dbclk); | 2212 | clk_enable(host->dbclk); |
@@ -2421,15 +2227,12 @@ static int omap_hsmmc_resume(struct device *dev) | |||
2421 | if (ret == 0) | 2227 | if (ret == 0) |
2422 | host->suspended = 0; | 2228 | host->suspended = 0; |
2423 | 2229 | ||
2424 | mmc_host_lazy_disable(host->mmc); | 2230 | pm_runtime_mark_last_busy(host->dev); |
2231 | pm_runtime_put_autosuspend(host->dev); | ||
2425 | } | 2232 | } |
2426 | 2233 | ||
2427 | return ret; | 2234 | return ret; |
2428 | 2235 | ||
2429 | clk_en_err: | ||
2430 | dev_dbg(mmc_dev(host->mmc), | ||
2431 | "Failed to enable MMC clocks during resume\n"); | ||
2432 | return ret; | ||
2433 | } | 2236 | } |
2434 | 2237 | ||
2435 | #else | 2238 | #else |
@@ -2437,9 +2240,33 @@ clk_en_err: | |||
2437 | #define omap_hsmmc_resume NULL | 2240 | #define omap_hsmmc_resume NULL |
2438 | #endif | 2241 | #endif |
2439 | 2242 | ||
2243 | static int omap_hsmmc_runtime_suspend(struct device *dev) | ||
2244 | { | ||
2245 | struct omap_hsmmc_host *host; | ||
2246 | |||
2247 | host = platform_get_drvdata(to_platform_device(dev)); | ||
2248 | omap_hsmmc_context_save(host); | ||
2249 | dev_dbg(mmc_dev(host->mmc), "disabled\n"); | ||
2250 | |||
2251 | return 0; | ||
2252 | } | ||
2253 | |||
2254 | static int omap_hsmmc_runtime_resume(struct device *dev) | ||
2255 | { | ||
2256 | struct omap_hsmmc_host *host; | ||
2257 | |||
2258 | host = platform_get_drvdata(to_platform_device(dev)); | ||
2259 | omap_hsmmc_context_restore(host); | ||
2260 | dev_dbg(mmc_dev(host->mmc), "enabled\n"); | ||
2261 | |||
2262 | return 0; | ||
2263 | } | ||
2264 | |||
2440 | static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { | 2265 | static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { |
2441 | .suspend = omap_hsmmc_suspend, | 2266 | .suspend = omap_hsmmc_suspend, |
2442 | .resume = omap_hsmmc_resume, | 2267 | .resume = omap_hsmmc_resume, |
2268 | .runtime_suspend = omap_hsmmc_runtime_suspend, | ||
2269 | .runtime_resume = omap_hsmmc_runtime_resume, | ||
2443 | }; | 2270 | }; |
2444 | 2271 | ||
2445 | static struct platform_driver omap_hsmmc_driver = { | 2272 | static struct platform_driver omap_hsmmc_driver = { |
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c index 9ebd1d7759dc..4b920b7621cf 100644 --- a/drivers/mmc/host/sdhci-cns3xxx.c +++ b/drivers/mmc/host/sdhci-cns3xxx.c | |||
@@ -15,9 +15,7 @@ | |||
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/mmc/host.h> | 17 | #include <linux/mmc/host.h> |
18 | #include <linux/mmc/sdhci-pltfm.h> | ||
19 | #include <mach/cns3xxx.h> | 18 | #include <mach/cns3xxx.h> |
20 | #include "sdhci.h" | ||
21 | #include "sdhci-pltfm.h" | 19 | #include "sdhci-pltfm.h" |
22 | 20 | ||
23 | static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host) | 21 | static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host) |
@@ -86,7 +84,7 @@ static struct sdhci_ops sdhci_cns3xxx_ops = { | |||
86 | .set_clock = sdhci_cns3xxx_set_clock, | 84 | .set_clock = sdhci_cns3xxx_set_clock, |
87 | }; | 85 | }; |
88 | 86 | ||
89 | struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { | 87 | static struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { |
90 | .ops = &sdhci_cns3xxx_ops, | 88 | .ops = &sdhci_cns3xxx_ops, |
91 | .quirks = SDHCI_QUIRK_BROKEN_DMA | | 89 | .quirks = SDHCI_QUIRK_BROKEN_DMA | |
92 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | | 90 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | |
@@ -95,3 +93,43 @@ struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { | |||
95 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | | 93 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | |
96 | SDHCI_QUIRK_NONSTANDARD_CLOCK, | 94 | SDHCI_QUIRK_NONSTANDARD_CLOCK, |
97 | }; | 95 | }; |
96 | |||
97 | static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev) | ||
98 | { | ||
99 | return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata); | ||
100 | } | ||
101 | |||
102 | static int __devexit sdhci_cns3xxx_remove(struct platform_device *pdev) | ||
103 | { | ||
104 | return sdhci_pltfm_unregister(pdev); | ||
105 | } | ||
106 | |||
107 | static struct platform_driver sdhci_cns3xxx_driver = { | ||
108 | .driver = { | ||
109 | .name = "sdhci-cns3xxx", | ||
110 | .owner = THIS_MODULE, | ||
111 | }, | ||
112 | .probe = sdhci_cns3xxx_probe, | ||
113 | .remove = __devexit_p(sdhci_cns3xxx_remove), | ||
114 | #ifdef CONFIG_PM | ||
115 | .suspend = sdhci_pltfm_suspend, | ||
116 | .resume = sdhci_pltfm_resume, | ||
117 | #endif | ||
118 | }; | ||
119 | |||
120 | static int __init sdhci_cns3xxx_init(void) | ||
121 | { | ||
122 | return platform_driver_register(&sdhci_cns3xxx_driver); | ||
123 | } | ||
124 | module_init(sdhci_cns3xxx_init); | ||
125 | |||
126 | static void __exit sdhci_cns3xxx_exit(void) | ||
127 | { | ||
128 | platform_driver_unregister(&sdhci_cns3xxx_driver); | ||
129 | } | ||
130 | module_exit(sdhci_cns3xxx_exit); | ||
131 | |||
132 | MODULE_DESCRIPTION("SDHCI driver for CNS3xxx"); | ||
133 | MODULE_AUTHOR("Scott Shu, " | ||
134 | "Anton Vorontsov <avorontsov@mvista.com>"); | ||
135 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c index 2aeef4ffed8c..f2d29dca4420 100644 --- a/drivers/mmc/host/sdhci-dove.c +++ b/drivers/mmc/host/sdhci-dove.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/mmc/host.h> | 23 | #include <linux/mmc/host.h> |
24 | 24 | ||
25 | #include "sdhci.h" | ||
26 | #include "sdhci-pltfm.h" | 25 | #include "sdhci-pltfm.h" |
27 | 26 | ||
28 | static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) | 27 | static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) |
@@ -61,10 +60,50 @@ static struct sdhci_ops sdhci_dove_ops = { | |||
61 | .read_l = sdhci_dove_readl, | 60 | .read_l = sdhci_dove_readl, |
62 | }; | 61 | }; |
63 | 62 | ||
64 | struct sdhci_pltfm_data sdhci_dove_pdata = { | 63 | static struct sdhci_pltfm_data sdhci_dove_pdata = { |
65 | .ops = &sdhci_dove_ops, | 64 | .ops = &sdhci_dove_ops, |
66 | .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | | 65 | .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | |
67 | SDHCI_QUIRK_NO_BUSY_IRQ | | 66 | SDHCI_QUIRK_NO_BUSY_IRQ | |
68 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | | 67 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | |
69 | SDHCI_QUIRK_FORCE_DMA, | 68 | SDHCI_QUIRK_FORCE_DMA, |
70 | }; | 69 | }; |
70 | |||
71 | static int __devinit sdhci_dove_probe(struct platform_device *pdev) | ||
72 | { | ||
73 | return sdhci_pltfm_register(pdev, &sdhci_dove_pdata); | ||
74 | } | ||
75 | |||
76 | static int __devexit sdhci_dove_remove(struct platform_device *pdev) | ||
77 | { | ||
78 | return sdhci_pltfm_unregister(pdev); | ||
79 | } | ||
80 | |||
81 | static struct platform_driver sdhci_dove_driver = { | ||
82 | .driver = { | ||
83 | .name = "sdhci-dove", | ||
84 | .owner = THIS_MODULE, | ||
85 | }, | ||
86 | .probe = sdhci_dove_probe, | ||
87 | .remove = __devexit_p(sdhci_dove_remove), | ||
88 | #ifdef CONFIG_PM | ||
89 | .suspend = sdhci_pltfm_suspend, | ||
90 | .resume = sdhci_pltfm_resume, | ||
91 | #endif | ||
92 | }; | ||
93 | |||
94 | static int __init sdhci_dove_init(void) | ||
95 | { | ||
96 | return platform_driver_register(&sdhci_dove_driver); | ||
97 | } | ||
98 | module_init(sdhci_dove_init); | ||
99 | |||
100 | static void __exit sdhci_dove_exit(void) | ||
101 | { | ||
102 | platform_driver_unregister(&sdhci_dove_driver); | ||
103 | } | ||
104 | module_exit(sdhci_dove_exit); | ||
105 | |||
106 | MODULE_DESCRIPTION("SDHCI driver for Dove"); | ||
107 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, " | ||
108 | "Mike Rapoport <mike@compulab.co.il>"); | ||
109 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index a19967d0bfc4..710b706f4fcf 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -18,12 +18,10 @@ | |||
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/mmc/host.h> | 20 | #include <linux/mmc/host.h> |
21 | #include <linux/mmc/sdhci-pltfm.h> | ||
22 | #include <linux/mmc/mmc.h> | 21 | #include <linux/mmc/mmc.h> |
23 | #include <linux/mmc/sdio.h> | 22 | #include <linux/mmc/sdio.h> |
24 | #include <mach/hardware.h> | 23 | #include <mach/hardware.h> |
25 | #include <mach/esdhc.h> | 24 | #include <mach/esdhc.h> |
26 | #include "sdhci.h" | ||
27 | #include "sdhci-pltfm.h" | 25 | #include "sdhci-pltfm.h" |
28 | #include "sdhci-esdhc.h" | 26 | #include "sdhci-esdhc.h" |
29 | 27 | ||
@@ -31,7 +29,7 @@ | |||
31 | #define SDHCI_VENDOR_SPEC 0xC0 | 29 | #define SDHCI_VENDOR_SPEC 0xC0 |
32 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | 30 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 |
33 | 31 | ||
34 | #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) | 32 | #define ESDHC_FLAG_GPIO_FOR_CD (1 << 0) |
35 | /* | 33 | /* |
36 | * The CMDTYPE of the CMD register (offset 0xE) should be set to | 34 | * The CMDTYPE of the CMD register (offset 0xE) should be set to |
37 | * "11" when the STOP CMD12 is issued on imx53 to abort one | 35 | * "11" when the STOP CMD12 is issued on imx53 to abort one |
@@ -67,14 +65,14 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | |||
67 | u32 val = readl(host->ioaddr + reg); | 65 | u32 val = readl(host->ioaddr + reg); |
68 | 66 | ||
69 | if (unlikely((reg == SDHCI_PRESENT_STATE) | 67 | if (unlikely((reg == SDHCI_PRESENT_STATE) |
70 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { | 68 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD))) { |
71 | struct esdhc_platform_data *boarddata = | 69 | struct esdhc_platform_data *boarddata = |
72 | host->mmc->parent->platform_data; | 70 | host->mmc->parent->platform_data; |
73 | 71 | ||
74 | if (boarddata && gpio_is_valid(boarddata->cd_gpio) | 72 | if (boarddata && gpio_is_valid(boarddata->cd_gpio) |
75 | && gpio_get_value(boarddata->cd_gpio)) | 73 | && gpio_get_value(boarddata->cd_gpio)) |
76 | /* no card, if a valid gpio says so... */ | 74 | /* no card, if a valid gpio says so... */ |
77 | val &= SDHCI_CARD_PRESENT; | 75 | val &= ~SDHCI_CARD_PRESENT; |
78 | else | 76 | else |
79 | /* ... in all other cases assume card is present */ | 77 | /* ... in all other cases assume card is present */ |
80 | val |= SDHCI_CARD_PRESENT; | 78 | val |= SDHCI_CARD_PRESENT; |
@@ -89,7 +87,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | |||
89 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | 87 | struct pltfm_imx_data *imx_data = pltfm_host->priv; |
90 | 88 | ||
91 | if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) | 89 | if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) |
92 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) | 90 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD))) |
93 | /* | 91 | /* |
94 | * these interrupts won't work with a custom card_detect gpio | 92 | * these interrupts won't work with a custom card_detect gpio |
95 | * (only applied to mx25/35) | 93 | * (only applied to mx25/35) |
@@ -191,16 +189,6 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) | |||
191 | return clk_get_rate(pltfm_host->clk) / 256 / 16; | 189 | return clk_get_rate(pltfm_host->clk) / 256 / 16; |
192 | } | 190 | } |
193 | 191 | ||
194 | static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) | ||
195 | { | ||
196 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | ||
197 | |||
198 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) | ||
199 | return gpio_get_value(boarddata->wp_gpio); | ||
200 | else | ||
201 | return -ENOSYS; | ||
202 | } | ||
203 | |||
204 | static struct sdhci_ops sdhci_esdhc_ops = { | 192 | static struct sdhci_ops sdhci_esdhc_ops = { |
205 | .read_l = esdhc_readl_le, | 193 | .read_l = esdhc_readl_le, |
206 | .read_w = esdhc_readw_le, | 194 | .read_w = esdhc_readw_le, |
@@ -212,6 +200,24 @@ static struct sdhci_ops sdhci_esdhc_ops = { | |||
212 | .get_min_clock = esdhc_pltfm_get_min_clock, | 200 | .get_min_clock = esdhc_pltfm_get_min_clock, |
213 | }; | 201 | }; |
214 | 202 | ||
203 | static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | ||
204 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA | ||
205 | | SDHCI_QUIRK_BROKEN_CARD_DETECTION, | ||
206 | /* ADMA has issues. Might be fixable */ | ||
207 | .ops = &sdhci_esdhc_ops, | ||
208 | }; | ||
209 | |||
210 | static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) | ||
211 | { | ||
212 | struct esdhc_platform_data *boarddata = | ||
213 | host->mmc->parent->platform_data; | ||
214 | |||
215 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) | ||
216 | return gpio_get_value(boarddata->wp_gpio); | ||
217 | else | ||
218 | return -ENOSYS; | ||
219 | } | ||
220 | |||
215 | static irqreturn_t cd_irq(int irq, void *data) | 221 | static irqreturn_t cd_irq(int irq, void *data) |
216 | { | 222 | { |
217 | struct sdhci_host *sdhost = (struct sdhci_host *)data; | 223 | struct sdhci_host *sdhost = (struct sdhci_host *)data; |
@@ -220,30 +226,35 @@ static irqreturn_t cd_irq(int irq, void *data) | |||
220 | return IRQ_HANDLED; | 226 | return IRQ_HANDLED; |
221 | }; | 227 | }; |
222 | 228 | ||
223 | static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) | 229 | static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev) |
224 | { | 230 | { |
225 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 231 | struct sdhci_pltfm_host *pltfm_host; |
226 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 232 | struct sdhci_host *host; |
233 | struct esdhc_platform_data *boarddata; | ||
227 | struct clk *clk; | 234 | struct clk *clk; |
228 | int err; | 235 | int err; |
229 | struct pltfm_imx_data *imx_data; | 236 | struct pltfm_imx_data *imx_data; |
230 | 237 | ||
238 | host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata); | ||
239 | if (IS_ERR(host)) | ||
240 | return PTR_ERR(host); | ||
241 | |||
242 | pltfm_host = sdhci_priv(host); | ||
243 | |||
244 | imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); | ||
245 | if (!imx_data) | ||
246 | return -ENOMEM; | ||
247 | pltfm_host->priv = imx_data; | ||
248 | |||
231 | clk = clk_get(mmc_dev(host->mmc), NULL); | 249 | clk = clk_get(mmc_dev(host->mmc), NULL); |
232 | if (IS_ERR(clk)) { | 250 | if (IS_ERR(clk)) { |
233 | dev_err(mmc_dev(host->mmc), "clk err\n"); | 251 | dev_err(mmc_dev(host->mmc), "clk err\n"); |
234 | return PTR_ERR(clk); | 252 | err = PTR_ERR(clk); |
253 | goto err_clk_get; | ||
235 | } | 254 | } |
236 | clk_enable(clk); | 255 | clk_enable(clk); |
237 | pltfm_host->clk = clk; | 256 | pltfm_host->clk = clk; |
238 | 257 | ||
239 | imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); | ||
240 | if (!imx_data) { | ||
241 | clk_disable(pltfm_host->clk); | ||
242 | clk_put(pltfm_host->clk); | ||
243 | return -ENOMEM; | ||
244 | } | ||
245 | pltfm_host->priv = imx_data; | ||
246 | |||
247 | if (!cpu_is_mx25()) | 258 | if (!cpu_is_mx25()) |
248 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; | 259 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; |
249 | 260 | ||
@@ -257,6 +268,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
257 | if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) | 268 | if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) |
258 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; | 269 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; |
259 | 270 | ||
271 | boarddata = host->mmc->parent->platform_data; | ||
260 | if (boarddata) { | 272 | if (boarddata) { |
261 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); | 273 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); |
262 | if (err) { | 274 | if (err) { |
@@ -284,11 +296,15 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
284 | goto no_card_detect_irq; | 296 | goto no_card_detect_irq; |
285 | } | 297 | } |
286 | 298 | ||
287 | imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; | 299 | imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD; |
288 | /* Now we have a working card_detect again */ | 300 | /* Now we have a working card_detect again */ |
289 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 301 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
290 | } | 302 | } |
291 | 303 | ||
304 | err = sdhci_add_host(host); | ||
305 | if (err) | ||
306 | goto err_add_host; | ||
307 | |||
292 | return 0; | 308 | return 0; |
293 | 309 | ||
294 | no_card_detect_irq: | 310 | no_card_detect_irq: |
@@ -297,14 +313,23 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
297 | boarddata->cd_gpio = err; | 313 | boarddata->cd_gpio = err; |
298 | not_supported: | 314 | not_supported: |
299 | kfree(imx_data); | 315 | kfree(imx_data); |
300 | return 0; | 316 | err_add_host: |
317 | clk_disable(pltfm_host->clk); | ||
318 | clk_put(pltfm_host->clk); | ||
319 | err_clk_get: | ||
320 | sdhci_pltfm_free(pdev); | ||
321 | return err; | ||
301 | } | 322 | } |
302 | 323 | ||
303 | static void esdhc_pltfm_exit(struct sdhci_host *host) | 324 | static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev) |
304 | { | 325 | { |
326 | struct sdhci_host *host = platform_get_drvdata(pdev); | ||
305 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 327 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
306 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 328 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; |
307 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | 329 | struct pltfm_imx_data *imx_data = pltfm_host->priv; |
330 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); | ||
331 | |||
332 | sdhci_remove_host(host, dead); | ||
308 | 333 | ||
309 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) | 334 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) |
310 | gpio_free(boarddata->wp_gpio); | 335 | gpio_free(boarddata->wp_gpio); |
@@ -319,13 +344,37 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) | |||
319 | clk_disable(pltfm_host->clk); | 344 | clk_disable(pltfm_host->clk); |
320 | clk_put(pltfm_host->clk); | 345 | clk_put(pltfm_host->clk); |
321 | kfree(imx_data); | 346 | kfree(imx_data); |
347 | |||
348 | sdhci_pltfm_free(pdev); | ||
349 | |||
350 | return 0; | ||
322 | } | 351 | } |
323 | 352 | ||
324 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | 353 | static struct platform_driver sdhci_esdhc_imx_driver = { |
325 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA | 354 | .driver = { |
326 | | SDHCI_QUIRK_BROKEN_CARD_DETECTION, | 355 | .name = "sdhci-esdhc-imx", |
327 | /* ADMA has issues. Might be fixable */ | 356 | .owner = THIS_MODULE, |
328 | .ops = &sdhci_esdhc_ops, | 357 | }, |
329 | .init = esdhc_pltfm_init, | 358 | .probe = sdhci_esdhc_imx_probe, |
330 | .exit = esdhc_pltfm_exit, | 359 | .remove = __devexit_p(sdhci_esdhc_imx_remove), |
360 | #ifdef CONFIG_PM | ||
361 | .suspend = sdhci_pltfm_suspend, | ||
362 | .resume = sdhci_pltfm_resume, | ||
363 | #endif | ||
331 | }; | 364 | }; |
365 | |||
366 | static int __init sdhci_esdhc_imx_init(void) | ||
367 | { | ||
368 | return platform_driver_register(&sdhci_esdhc_imx_driver); | ||
369 | } | ||
370 | module_init(sdhci_esdhc_imx_init); | ||
371 | |||
372 | static void __exit sdhci_esdhc_imx_exit(void) | ||
373 | { | ||
374 | platform_driver_unregister(&sdhci_esdhc_imx_driver); | ||
375 | } | ||
376 | module_exit(sdhci_esdhc_imx_exit); | ||
377 | |||
378 | MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC"); | ||
379 | MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); | ||
380 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c deleted file mode 100644 index 60e4186a4345..000000000000 --- a/drivers/mmc/host/sdhci-of-core.c +++ /dev/null | |||
@@ -1,253 +0,0 @@ | |||
1 | /* | ||
2 | * OpenFirmware bindings for Secure Digital Host Controller Interface. | ||
3 | * | ||
4 | * Copyright (c) 2007 Freescale Semiconductor, Inc. | ||
5 | * Copyright (c) 2009 MontaVista Software, Inc. | ||
6 | * | ||
7 | * Authors: Xiaobo Xie <X.Xie@freescale.com> | ||
8 | * Anton Vorontsov <avorontsov@ru.mvista.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or (at | ||
13 | * your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/err.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/of.h> | ||
23 | #include <linux/of_platform.h> | ||
24 | #include <linux/of_address.h> | ||
25 | #include <linux/of_irq.h> | ||
26 | #include <linux/mmc/host.h> | ||
27 | #ifdef CONFIG_PPC | ||
28 | #include <asm/machdep.h> | ||
29 | #endif | ||
30 | #include "sdhci-of.h" | ||
31 | #include "sdhci.h" | ||
32 | |||
33 | #ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER | ||
34 | |||
35 | /* | ||
36 | * These accessors are designed for big endian hosts doing I/O to | ||
37 | * little endian controllers incorporating a 32-bit hardware byte swapper. | ||
38 | */ | ||
39 | |||
40 | u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg) | ||
41 | { | ||
42 | return in_be32(host->ioaddr + reg); | ||
43 | } | ||
44 | |||
45 | u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg) | ||
46 | { | ||
47 | return in_be16(host->ioaddr + (reg ^ 0x2)); | ||
48 | } | ||
49 | |||
50 | u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg) | ||
51 | { | ||
52 | return in_8(host->ioaddr + (reg ^ 0x3)); | ||
53 | } | ||
54 | |||
55 | void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg) | ||
56 | { | ||
57 | out_be32(host->ioaddr + reg, val); | ||
58 | } | ||
59 | |||
60 | void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg) | ||
61 | { | ||
62 | struct sdhci_of_host *of_host = sdhci_priv(host); | ||
63 | int base = reg & ~0x3; | ||
64 | int shift = (reg & 0x2) * 8; | ||
65 | |||
66 | switch (reg) { | ||
67 | case SDHCI_TRANSFER_MODE: | ||
68 | /* | ||
69 | * Postpone this write, we must do it together with a | ||
70 | * command write that is down below. | ||
71 | */ | ||
72 | of_host->xfer_mode_shadow = val; | ||
73 | return; | ||
74 | case SDHCI_COMMAND: | ||
75 | sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow, | ||
76 | SDHCI_TRANSFER_MODE); | ||
77 | return; | ||
78 | } | ||
79 | clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); | ||
80 | } | ||
81 | |||
82 | void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) | ||
83 | { | ||
84 | int base = reg & ~0x3; | ||
85 | int shift = (reg & 0x3) * 8; | ||
86 | |||
87 | clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); | ||
88 | } | ||
89 | #endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */ | ||
90 | |||
91 | #ifdef CONFIG_PM | ||
92 | |||
93 | static int sdhci_of_suspend(struct platform_device *ofdev, pm_message_t state) | ||
94 | { | ||
95 | struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); | ||
96 | |||
97 | return mmc_suspend_host(host->mmc); | ||
98 | } | ||
99 | |||
100 | static int sdhci_of_resume(struct platform_device *ofdev) | ||
101 | { | ||
102 | struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); | ||
103 | |||
104 | return mmc_resume_host(host->mmc); | ||
105 | } | ||
106 | |||
107 | #else | ||
108 | |||
109 | #define sdhci_of_suspend NULL | ||
110 | #define sdhci_of_resume NULL | ||
111 | |||
112 | #endif | ||
113 | |||
114 | static bool __devinit sdhci_of_wp_inverted(struct device_node *np) | ||
115 | { | ||
116 | if (of_get_property(np, "sdhci,wp-inverted", NULL)) | ||
117 | return true; | ||
118 | |||
119 | /* Old device trees don't have the wp-inverted property. */ | ||
120 | #ifdef CONFIG_PPC | ||
121 | return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); | ||
122 | #else | ||
123 | return false; | ||
124 | #endif | ||
125 | } | ||
126 | |||
127 | static const struct of_device_id sdhci_of_match[]; | ||
128 | static int __devinit sdhci_of_probe(struct platform_device *ofdev) | ||
129 | { | ||
130 | const struct of_device_id *match; | ||
131 | struct device_node *np = ofdev->dev.of_node; | ||
132 | struct sdhci_of_data *sdhci_of_data; | ||
133 | struct sdhci_host *host; | ||
134 | struct sdhci_of_host *of_host; | ||
135 | const __be32 *clk; | ||
136 | int size; | ||
137 | int ret; | ||
138 | |||
139 | match = of_match_device(sdhci_of_match, &ofdev->dev); | ||
140 | if (!match) | ||
141 | return -EINVAL; | ||
142 | sdhci_of_data = match->data; | ||
143 | |||
144 | if (!of_device_is_available(np)) | ||
145 | return -ENODEV; | ||
146 | |||
147 | host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host)); | ||
148 | if (IS_ERR(host)) | ||
149 | return -ENOMEM; | ||
150 | |||
151 | of_host = sdhci_priv(host); | ||
152 | dev_set_drvdata(&ofdev->dev, host); | ||
153 | |||
154 | host->ioaddr = of_iomap(np, 0); | ||
155 | if (!host->ioaddr) { | ||
156 | ret = -ENOMEM; | ||
157 | goto err_addr_map; | ||
158 | } | ||
159 | |||
160 | host->irq = irq_of_parse_and_map(np, 0); | ||
161 | if (!host->irq) { | ||
162 | ret = -EINVAL; | ||
163 | goto err_no_irq; | ||
164 | } | ||
165 | |||
166 | host->hw_name = dev_name(&ofdev->dev); | ||
167 | if (sdhci_of_data) { | ||
168 | host->quirks = sdhci_of_data->quirks; | ||
169 | host->ops = &sdhci_of_data->ops; | ||
170 | } | ||
171 | |||
172 | if (of_get_property(np, "sdhci,auto-cmd12", NULL)) | ||
173 | host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; | ||
174 | |||
175 | |||
176 | if (of_get_property(np, "sdhci,1-bit-only", NULL)) | ||
177 | host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; | ||
178 | |||
179 | if (sdhci_of_wp_inverted(np)) | ||
180 | host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; | ||
181 | |||
182 | clk = of_get_property(np, "clock-frequency", &size); | ||
183 | if (clk && size == sizeof(*clk) && *clk) | ||
184 | of_host->clock = be32_to_cpup(clk); | ||
185 | |||
186 | ret = sdhci_add_host(host); | ||
187 | if (ret) | ||
188 | goto err_add_host; | ||
189 | |||
190 | return 0; | ||
191 | |||
192 | err_add_host: | ||
193 | irq_dispose_mapping(host->irq); | ||
194 | err_no_irq: | ||
195 | iounmap(host->ioaddr); | ||
196 | err_addr_map: | ||
197 | sdhci_free_host(host); | ||
198 | return ret; | ||
199 | } | ||
200 | |||
201 | static int __devexit sdhci_of_remove(struct platform_device *ofdev) | ||
202 | { | ||
203 | struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); | ||
204 | |||
205 | sdhci_remove_host(host, 0); | ||
206 | sdhci_free_host(host); | ||
207 | irq_dispose_mapping(host->irq); | ||
208 | iounmap(host->ioaddr); | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static const struct of_device_id sdhci_of_match[] = { | ||
213 | #ifdef CONFIG_MMC_SDHCI_OF_ESDHC | ||
214 | { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, | ||
215 | { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, | ||
216 | { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, }, | ||
217 | #endif | ||
218 | #ifdef CONFIG_MMC_SDHCI_OF_HLWD | ||
219 | { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, }, | ||
220 | #endif | ||
221 | { .compatible = "generic-sdhci", }, | ||
222 | {}, | ||
223 | }; | ||
224 | MODULE_DEVICE_TABLE(of, sdhci_of_match); | ||
225 | |||
226 | static struct platform_driver sdhci_of_driver = { | ||
227 | .driver = { | ||
228 | .name = "sdhci-of", | ||
229 | .owner = THIS_MODULE, | ||
230 | .of_match_table = sdhci_of_match, | ||
231 | }, | ||
232 | .probe = sdhci_of_probe, | ||
233 | .remove = __devexit_p(sdhci_of_remove), | ||
234 | .suspend = sdhci_of_suspend, | ||
235 | .resume = sdhci_of_resume, | ||
236 | }; | ||
237 | |||
238 | static int __init sdhci_of_init(void) | ||
239 | { | ||
240 | return platform_driver_register(&sdhci_of_driver); | ||
241 | } | ||
242 | module_init(sdhci_of_init); | ||
243 | |||
244 | static void __exit sdhci_of_exit(void) | ||
245 | { | ||
246 | platform_driver_unregister(&sdhci_of_driver); | ||
247 | } | ||
248 | module_exit(sdhci_of_exit); | ||
249 | |||
250 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface OF driver"); | ||
251 | MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, " | ||
252 | "Anton Vorontsov <avorontsov@ru.mvista.com>"); | ||
253 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index ba40d6d035c7..fe604df65011 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
@@ -16,8 +16,7 @@ | |||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/mmc/host.h> | 18 | #include <linux/mmc/host.h> |
19 | #include "sdhci-of.h" | 19 | #include "sdhci-pltfm.h" |
20 | #include "sdhci.h" | ||
21 | #include "sdhci-esdhc.h" | 20 | #include "sdhci-esdhc.h" |
22 | 21 | ||
23 | static u16 esdhc_readw(struct sdhci_host *host, int reg) | 22 | static u16 esdhc_readw(struct sdhci_host *host, int reg) |
@@ -60,32 +59,83 @@ static int esdhc_of_enable_dma(struct sdhci_host *host) | |||
60 | 59 | ||
61 | static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host) | 60 | static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host) |
62 | { | 61 | { |
63 | struct sdhci_of_host *of_host = sdhci_priv(host); | 62 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
64 | 63 | ||
65 | return of_host->clock; | 64 | return pltfm_host->clock; |
66 | } | 65 | } |
67 | 66 | ||
68 | static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) | 67 | static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) |
69 | { | 68 | { |
70 | struct sdhci_of_host *of_host = sdhci_priv(host); | 69 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
71 | 70 | ||
72 | return of_host->clock / 256 / 16; | 71 | return pltfm_host->clock / 256 / 16; |
73 | } | 72 | } |
74 | 73 | ||
75 | struct sdhci_of_data sdhci_esdhc = { | 74 | static struct sdhci_ops sdhci_esdhc_ops = { |
75 | .read_l = sdhci_be32bs_readl, | ||
76 | .read_w = esdhc_readw, | ||
77 | .read_b = sdhci_be32bs_readb, | ||
78 | .write_l = sdhci_be32bs_writel, | ||
79 | .write_w = esdhc_writew, | ||
80 | .write_b = esdhc_writeb, | ||
81 | .set_clock = esdhc_set_clock, | ||
82 | .enable_dma = esdhc_of_enable_dma, | ||
83 | .get_max_clock = esdhc_of_get_max_clock, | ||
84 | .get_min_clock = esdhc_of_get_min_clock, | ||
85 | }; | ||
86 | |||
87 | static struct sdhci_pltfm_data sdhci_esdhc_pdata = { | ||
76 | /* card detection could be handled via GPIO */ | 88 | /* card detection could be handled via GPIO */ |
77 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION | 89 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION |
78 | | SDHCI_QUIRK_NO_CARD_NO_RESET, | 90 | | SDHCI_QUIRK_NO_CARD_NO_RESET, |
79 | .ops = { | 91 | .ops = &sdhci_esdhc_ops, |
80 | .read_l = sdhci_be32bs_readl, | 92 | }; |
81 | .read_w = esdhc_readw, | 93 | |
82 | .read_b = sdhci_be32bs_readb, | 94 | static int __devinit sdhci_esdhc_probe(struct platform_device *pdev) |
83 | .write_l = sdhci_be32bs_writel, | 95 | { |
84 | .write_w = esdhc_writew, | 96 | return sdhci_pltfm_register(pdev, &sdhci_esdhc_pdata); |
85 | .write_b = esdhc_writeb, | 97 | } |
86 | .set_clock = esdhc_set_clock, | 98 | |
87 | .enable_dma = esdhc_of_enable_dma, | 99 | static int __devexit sdhci_esdhc_remove(struct platform_device *pdev) |
88 | .get_max_clock = esdhc_of_get_max_clock, | 100 | { |
89 | .get_min_clock = esdhc_of_get_min_clock, | 101 | return sdhci_pltfm_unregister(pdev); |
102 | } | ||
103 | |||
104 | static const struct of_device_id sdhci_esdhc_of_match[] = { | ||
105 | { .compatible = "fsl,mpc8379-esdhc" }, | ||
106 | { .compatible = "fsl,mpc8536-esdhc" }, | ||
107 | { .compatible = "fsl,esdhc" }, | ||
108 | { } | ||
109 | }; | ||
110 | MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match); | ||
111 | |||
112 | static struct platform_driver sdhci_esdhc_driver = { | ||
113 | .driver = { | ||
114 | .name = "sdhci-esdhc", | ||
115 | .owner = THIS_MODULE, | ||
116 | .of_match_table = sdhci_esdhc_of_match, | ||
90 | }, | 117 | }, |
118 | .probe = sdhci_esdhc_probe, | ||
119 | .remove = __devexit_p(sdhci_esdhc_remove), | ||
120 | #ifdef CONFIG_PM | ||
121 | .suspend = sdhci_pltfm_suspend, | ||
122 | .resume = sdhci_pltfm_resume, | ||
123 | #endif | ||
91 | }; | 124 | }; |
125 | |||
126 | static int __init sdhci_esdhc_init(void) | ||
127 | { | ||
128 | return platform_driver_register(&sdhci_esdhc_driver); | ||
129 | } | ||
130 | module_init(sdhci_esdhc_init); | ||
131 | |||
132 | static void __exit sdhci_esdhc_exit(void) | ||
133 | { | ||
134 | platform_driver_unregister(&sdhci_esdhc_driver); | ||
135 | } | ||
136 | module_exit(sdhci_esdhc_exit); | ||
137 | |||
138 | MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC"); | ||
139 | MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, " | ||
140 | "Anton Vorontsov <avorontsov@ru.mvista.com>"); | ||
141 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c index 68ddb7546ae2..735be131dca9 100644 --- a/drivers/mmc/host/sdhci-of-hlwd.c +++ b/drivers/mmc/host/sdhci-of-hlwd.c | |||
@@ -21,8 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/mmc/host.h> | 23 | #include <linux/mmc/host.h> |
24 | #include "sdhci-of.h" | 24 | #include "sdhci-pltfm.h" |
25 | #include "sdhci.h" | ||
26 | 25 | ||
27 | /* | 26 | /* |
28 | * Ops and quirks for the Nintendo Wii SDHCI controllers. | 27 | * Ops and quirks for the Nintendo Wii SDHCI controllers. |
@@ -51,15 +50,63 @@ static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg) | |||
51 | udelay(SDHCI_HLWD_WRITE_DELAY); | 50 | udelay(SDHCI_HLWD_WRITE_DELAY); |
52 | } | 51 | } |
53 | 52 | ||
54 | struct sdhci_of_data sdhci_hlwd = { | 53 | static struct sdhci_ops sdhci_hlwd_ops = { |
54 | .read_l = sdhci_be32bs_readl, | ||
55 | .read_w = sdhci_be32bs_readw, | ||
56 | .read_b = sdhci_be32bs_readb, | ||
57 | .write_l = sdhci_hlwd_writel, | ||
58 | .write_w = sdhci_hlwd_writew, | ||
59 | .write_b = sdhci_hlwd_writeb, | ||
60 | }; | ||
61 | |||
62 | static struct sdhci_pltfm_data sdhci_hlwd_pdata = { | ||
55 | .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | | 63 | .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | |
56 | SDHCI_QUIRK_32BIT_DMA_SIZE, | 64 | SDHCI_QUIRK_32BIT_DMA_SIZE, |
57 | .ops = { | 65 | .ops = &sdhci_hlwd_ops, |
58 | .read_l = sdhci_be32bs_readl, | 66 | }; |
59 | .read_w = sdhci_be32bs_readw, | 67 | |
60 | .read_b = sdhci_be32bs_readb, | 68 | static int __devinit sdhci_hlwd_probe(struct platform_device *pdev) |
61 | .write_l = sdhci_hlwd_writel, | 69 | { |
62 | .write_w = sdhci_hlwd_writew, | 70 | return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata); |
63 | .write_b = sdhci_hlwd_writeb, | 71 | } |
72 | |||
73 | static int __devexit sdhci_hlwd_remove(struct platform_device *pdev) | ||
74 | { | ||
75 | return sdhci_pltfm_unregister(pdev); | ||
76 | } | ||
77 | |||
78 | static const struct of_device_id sdhci_hlwd_of_match[] = { | ||
79 | { .compatible = "nintendo,hollywood-sdhci" }, | ||
80 | { } | ||
81 | }; | ||
82 | MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match); | ||
83 | |||
84 | static struct platform_driver sdhci_hlwd_driver = { | ||
85 | .driver = { | ||
86 | .name = "sdhci-hlwd", | ||
87 | .owner = THIS_MODULE, | ||
88 | .of_match_table = sdhci_hlwd_of_match, | ||
64 | }, | 89 | }, |
90 | .probe = sdhci_hlwd_probe, | ||
91 | .remove = __devexit_p(sdhci_hlwd_remove), | ||
92 | #ifdef CONFIG_PM | ||
93 | .suspend = sdhci_pltfm_suspend, | ||
94 | .resume = sdhci_pltfm_resume, | ||
95 | #endif | ||
65 | }; | 96 | }; |
97 | |||
98 | static int __init sdhci_hlwd_init(void) | ||
99 | { | ||
100 | return platform_driver_register(&sdhci_hlwd_driver); | ||
101 | } | ||
102 | module_init(sdhci_hlwd_init); | ||
103 | |||
104 | static void __exit sdhci_hlwd_exit(void) | ||
105 | { | ||
106 | platform_driver_unregister(&sdhci_hlwd_driver); | ||
107 | } | ||
108 | module_exit(sdhci_hlwd_exit); | ||
109 | |||
110 | MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver"); | ||
111 | MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz"); | ||
112 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h deleted file mode 100644 index ad09ad9915d8..000000000000 --- a/drivers/mmc/host/sdhci-of.h +++ /dev/null | |||
@@ -1,42 +0,0 @@ | |||
1 | /* | ||
2 | * OpenFirmware bindings for Secure Digital Host Controller Interface. | ||
3 | * | ||
4 | * Copyright (c) 2007 Freescale Semiconductor, Inc. | ||
5 | * Copyright (c) 2009 MontaVista Software, Inc. | ||
6 | * | ||
7 | * Authors: Xiaobo Xie <X.Xie@freescale.com> | ||
8 | * Anton Vorontsov <avorontsov@ru.mvista.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or (at | ||
13 | * your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #ifndef __SDHCI_OF_H | ||
17 | #define __SDHCI_OF_H | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | #include "sdhci.h" | ||
21 | |||
22 | struct sdhci_of_data { | ||
23 | unsigned int quirks; | ||
24 | struct sdhci_ops ops; | ||
25 | }; | ||
26 | |||
27 | struct sdhci_of_host { | ||
28 | unsigned int clock; | ||
29 | u16 xfer_mode_shadow; | ||
30 | }; | ||
31 | |||
32 | extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg); | ||
33 | extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg); | ||
34 | extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg); | ||
35 | extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg); | ||
36 | extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg); | ||
37 | extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg); | ||
38 | |||
39 | extern struct sdhci_of_data sdhci_esdhc; | ||
40 | extern struct sdhci_of_data sdhci_hlwd; | ||
41 | |||
42 | #endif /* __SDHCI_OF_H */ | ||
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 936bbca19c0a..26c528648f3c 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -143,6 +143,12 @@ static const struct sdhci_pci_fixes sdhci_cafe = { | |||
143 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, | 143 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, |
144 | }; | 144 | }; |
145 | 145 | ||
146 | static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) | ||
147 | { | ||
148 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
149 | return 0; | ||
150 | } | ||
151 | |||
146 | /* | 152 | /* |
147 | * ADMA operation is disabled for Moorestown platform due to | 153 | * ADMA operation is disabled for Moorestown platform due to |
148 | * hardware bugs. | 154 | * hardware bugs. |
@@ -157,8 +163,15 @@ static int mrst_hc_probe(struct sdhci_pci_chip *chip) | |||
157 | return 0; | 163 | return 0; |
158 | } | 164 | } |
159 | 165 | ||
166 | static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) | ||
167 | { | ||
168 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
169 | return 0; | ||
170 | } | ||
171 | |||
160 | static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { | 172 | static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { |
161 | .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, | 173 | .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, |
174 | .probe_slot = mrst_hc_probe_slot, | ||
162 | }; | 175 | }; |
163 | 176 | ||
164 | static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { | 177 | static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { |
@@ -170,8 +183,13 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { | |||
170 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, | 183 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, |
171 | }; | 184 | }; |
172 | 185 | ||
173 | static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = { | 186 | static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { |
187 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, | ||
188 | }; | ||
189 | |||
190 | static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { | ||
174 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, | 191 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, |
192 | .probe_slot = mfd_emmc_probe_slot, | ||
175 | }; | 193 | }; |
176 | 194 | ||
177 | /* O2Micro extra registers */ | 195 | /* O2Micro extra registers */ |
@@ -682,7 +700,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = { | |||
682 | .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1, | 700 | .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1, |
683 | .subvendor = PCI_ANY_ID, | 701 | .subvendor = PCI_ANY_ID, |
684 | .subdevice = PCI_ANY_ID, | 702 | .subdevice = PCI_ANY_ID, |
685 | .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, | 703 | .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, |
686 | }, | 704 | }, |
687 | 705 | ||
688 | { | 706 | { |
@@ -690,7 +708,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = { | |||
690 | .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2, | 708 | .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2, |
691 | .subvendor = PCI_ANY_ID, | 709 | .subvendor = PCI_ANY_ID, |
692 | .subdevice = PCI_ANY_ID, | 710 | .subdevice = PCI_ANY_ID, |
693 | .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, | 711 | .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, |
694 | }, | 712 | }, |
695 | 713 | ||
696 | { | 714 | { |
@@ -698,7 +716,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = { | |||
698 | .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0, | 716 | .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0, |
699 | .subvendor = PCI_ANY_ID, | 717 | .subvendor = PCI_ANY_ID, |
700 | .subdevice = PCI_ANY_ID, | 718 | .subdevice = PCI_ANY_ID, |
701 | .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, | 719 | .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, |
702 | }, | 720 | }, |
703 | 721 | ||
704 | { | 722 | { |
@@ -706,7 +724,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = { | |||
706 | .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1, | 724 | .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1, |
707 | .subvendor = PCI_ANY_ID, | 725 | .subvendor = PCI_ANY_ID, |
708 | .subdevice = PCI_ANY_ID, | 726 | .subdevice = PCI_ANY_ID, |
709 | .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, | 727 | .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, |
710 | }, | 728 | }, |
711 | 729 | ||
712 | { | 730 | { |
@@ -789,8 +807,34 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host) | |||
789 | return 0; | 807 | return 0; |
790 | } | 808 | } |
791 | 809 | ||
810 | static int sdhci_pci_8bit_width(struct sdhci_host *host, int width) | ||
811 | { | ||
812 | u8 ctrl; | ||
813 | |||
814 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | ||
815 | |||
816 | switch (width) { | ||
817 | case MMC_BUS_WIDTH_8: | ||
818 | ctrl |= SDHCI_CTRL_8BITBUS; | ||
819 | ctrl &= ~SDHCI_CTRL_4BITBUS; | ||
820 | break; | ||
821 | case MMC_BUS_WIDTH_4: | ||
822 | ctrl |= SDHCI_CTRL_4BITBUS; | ||
823 | ctrl &= ~SDHCI_CTRL_8BITBUS; | ||
824 | break; | ||
825 | default: | ||
826 | ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS); | ||
827 | break; | ||
828 | } | ||
829 | |||
830 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | ||
831 | |||
832 | return 0; | ||
833 | } | ||
834 | |||
792 | static struct sdhci_ops sdhci_pci_ops = { | 835 | static struct sdhci_ops sdhci_pci_ops = { |
793 | .enable_dma = sdhci_pci_enable_dma, | 836 | .enable_dma = sdhci_pci_enable_dma, |
837 | .platform_8bit_width = sdhci_pci_8bit_width, | ||
794 | }; | 838 | }; |
795 | 839 | ||
796 | /*****************************************************************************\ | 840 | /*****************************************************************************\ |
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index dbab0407f4b6..71c0ce1f6db0 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c | |||
@@ -2,6 +2,12 @@ | |||
2 | * sdhci-pltfm.c Support for SDHCI platform devices | 2 | * sdhci-pltfm.c Support for SDHCI platform devices |
3 | * Copyright (c) 2009 Intel Corporation | 3 | * Copyright (c) 2009 Intel Corporation |
4 | * | 4 | * |
5 | * Copyright (c) 2007 Freescale Semiconductor, Inc. | ||
6 | * Copyright (c) 2009 MontaVista Software, Inc. | ||
7 | * | ||
8 | * Authors: Xiaobo Xie <X.Xie@freescale.com> | ||
9 | * Anton Vorontsov <avorontsov@ru.mvista.com> | ||
10 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 13 | * published by the Free Software Foundation. |
@@ -22,48 +28,66 @@ | |||
22 | * Inspired by sdhci-pci.c, by Pierre Ossman | 28 | * Inspired by sdhci-pci.c, by Pierre Ossman |
23 | */ | 29 | */ |
24 | 30 | ||
25 | #include <linux/delay.h> | 31 | #include <linux/err.h> |
26 | #include <linux/highmem.h> | 32 | #include <linux/of.h> |
27 | #include <linux/mod_devicetable.h> | 33 | #ifdef CONFIG_PPC |
28 | #include <linux/platform_device.h> | 34 | #include <asm/machdep.h> |
35 | #endif | ||
36 | #include "sdhci-pltfm.h" | ||
29 | 37 | ||
30 | #include <linux/mmc/host.h> | 38 | static struct sdhci_ops sdhci_pltfm_ops = { |
39 | }; | ||
31 | 40 | ||
32 | #include <linux/io.h> | 41 | #ifdef CONFIG_OF |
33 | #include <linux/mmc/sdhci-pltfm.h> | 42 | static bool sdhci_of_wp_inverted(struct device_node *np) |
43 | { | ||
44 | if (of_get_property(np, "sdhci,wp-inverted", NULL)) | ||
45 | return true; | ||
34 | 46 | ||
35 | #include "sdhci.h" | 47 | /* Old device trees don't have the wp-inverted property. */ |
36 | #include "sdhci-pltfm.h" | 48 | #ifdef CONFIG_PPC |
49 | return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); | ||
50 | #else | ||
51 | return false; | ||
52 | #endif /* CONFIG_PPC */ | ||
53 | } | ||
54 | |||
55 | void sdhci_get_of_property(struct platform_device *pdev) | ||
56 | { | ||
57 | struct device_node *np = pdev->dev.of_node; | ||
58 | struct sdhci_host *host = platform_get_drvdata(pdev); | ||
59 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
60 | const __be32 *clk; | ||
61 | int size; | ||
37 | 62 | ||
38 | /*****************************************************************************\ | 63 | if (of_device_is_available(np)) { |
39 | * * | 64 | if (of_get_property(np, "sdhci,auto-cmd12", NULL)) |
40 | * SDHCI core callbacks * | 65 | host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; |
41 | * * | ||
42 | \*****************************************************************************/ | ||
43 | 66 | ||
44 | static struct sdhci_ops sdhci_pltfm_ops = { | 67 | if (of_get_property(np, "sdhci,1-bit-only", NULL)) |
45 | }; | 68 | host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; |
46 | 69 | ||
47 | /*****************************************************************************\ | 70 | if (sdhci_of_wp_inverted(np)) |
48 | * * | 71 | host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; |
49 | * Device probing/removal * | ||
50 | * * | ||
51 | \*****************************************************************************/ | ||
52 | 72 | ||
53 | static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) | 73 | clk = of_get_property(np, "clock-frequency", &size); |
74 | if (clk && size == sizeof(*clk) && *clk) | ||
75 | pltfm_host->clock = be32_to_cpup(clk); | ||
76 | } | ||
77 | } | ||
78 | #else | ||
79 | void sdhci_get_of_property(struct platform_device *pdev) {} | ||
80 | #endif /* CONFIG_OF */ | ||
81 | EXPORT_SYMBOL_GPL(sdhci_get_of_property); | ||
82 | |||
83 | struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, | ||
84 | struct sdhci_pltfm_data *pdata) | ||
54 | { | 85 | { |
55 | const struct platform_device_id *platid = platform_get_device_id(pdev); | ||
56 | struct sdhci_pltfm_data *pdata; | ||
57 | struct sdhci_host *host; | 86 | struct sdhci_host *host; |
58 | struct sdhci_pltfm_host *pltfm_host; | 87 | struct sdhci_pltfm_host *pltfm_host; |
59 | struct resource *iomem; | 88 | struct resource *iomem; |
60 | int ret; | 89 | int ret; |
61 | 90 | ||
62 | if (platid && platid->driver_data) | ||
63 | pdata = (void *)platid->driver_data; | ||
64 | else | ||
65 | pdata = pdev->dev.platform_data; | ||
66 | |||
67 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 91 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
68 | if (!iomem) { | 92 | if (!iomem) { |
69 | ret = -ENOMEM; | 93 | ret = -ENOMEM; |
@@ -71,8 +95,7 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) | |||
71 | } | 95 | } |
72 | 96 | ||
73 | if (resource_size(iomem) < 0x100) | 97 | if (resource_size(iomem) < 0x100) |
74 | dev_err(&pdev->dev, "Invalid iomem size. You may " | 98 | dev_err(&pdev->dev, "Invalid iomem size!\n"); |
75 | "experience problems.\n"); | ||
76 | 99 | ||
77 | /* Some PCI-based MFD need the parent here */ | 100 | /* Some PCI-based MFD need the parent here */ |
78 | if (pdev->dev.parent != &platform_bus) | 101 | if (pdev->dev.parent != &platform_bus) |
@@ -87,7 +110,7 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) | |||
87 | 110 | ||
88 | pltfm_host = sdhci_priv(host); | 111 | pltfm_host = sdhci_priv(host); |
89 | 112 | ||
90 | host->hw_name = "platform"; | 113 | host->hw_name = dev_name(&pdev->dev); |
91 | if (pdata && pdata->ops) | 114 | if (pdata && pdata->ops) |
92 | host->ops = pdata->ops; | 115 | host->ops = pdata->ops; |
93 | else | 116 | else |
@@ -110,126 +133,95 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) | |||
110 | goto err_remap; | 133 | goto err_remap; |
111 | } | 134 | } |
112 | 135 | ||
113 | if (pdata && pdata->init) { | ||
114 | ret = pdata->init(host, pdata); | ||
115 | if (ret) | ||
116 | goto err_plat_init; | ||
117 | } | ||
118 | |||
119 | ret = sdhci_add_host(host); | ||
120 | if (ret) | ||
121 | goto err_add_host; | ||
122 | |||
123 | platform_set_drvdata(pdev, host); | 136 | platform_set_drvdata(pdev, host); |
124 | 137 | ||
125 | return 0; | 138 | return host; |
126 | 139 | ||
127 | err_add_host: | ||
128 | if (pdata && pdata->exit) | ||
129 | pdata->exit(host); | ||
130 | err_plat_init: | ||
131 | iounmap(host->ioaddr); | ||
132 | err_remap: | 140 | err_remap: |
133 | release_mem_region(iomem->start, resource_size(iomem)); | 141 | release_mem_region(iomem->start, resource_size(iomem)); |
134 | err_request: | 142 | err_request: |
135 | sdhci_free_host(host); | 143 | sdhci_free_host(host); |
136 | err: | 144 | err: |
137 | printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret); | 145 | dev_err(&pdev->dev, "%s failed %d\n", __func__, ret); |
138 | return ret; | 146 | return ERR_PTR(ret); |
139 | } | 147 | } |
148 | EXPORT_SYMBOL_GPL(sdhci_pltfm_init); | ||
140 | 149 | ||
141 | static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) | 150 | void sdhci_pltfm_free(struct platform_device *pdev) |
142 | { | 151 | { |
143 | struct sdhci_pltfm_data *pdata = pdev->dev.platform_data; | ||
144 | struct sdhci_host *host = platform_get_drvdata(pdev); | 152 | struct sdhci_host *host = platform_get_drvdata(pdev); |
145 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 153 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
146 | int dead; | ||
147 | u32 scratch; | ||
148 | |||
149 | dead = 0; | ||
150 | scratch = readl(host->ioaddr + SDHCI_INT_STATUS); | ||
151 | if (scratch == (u32)-1) | ||
152 | dead = 1; | ||
153 | 154 | ||
154 | sdhci_remove_host(host, dead); | ||
155 | if (pdata && pdata->exit) | ||
156 | pdata->exit(host); | ||
157 | iounmap(host->ioaddr); | 155 | iounmap(host->ioaddr); |
158 | release_mem_region(iomem->start, resource_size(iomem)); | 156 | release_mem_region(iomem->start, resource_size(iomem)); |
159 | sdhci_free_host(host); | 157 | sdhci_free_host(host); |
160 | platform_set_drvdata(pdev, NULL); | 158 | platform_set_drvdata(pdev, NULL); |
159 | } | ||
160 | EXPORT_SYMBOL_GPL(sdhci_pltfm_free); | ||
161 | 161 | ||
162 | return 0; | 162 | int sdhci_pltfm_register(struct platform_device *pdev, |
163 | struct sdhci_pltfm_data *pdata) | ||
164 | { | ||
165 | struct sdhci_host *host; | ||
166 | int ret = 0; | ||
167 | |||
168 | host = sdhci_pltfm_init(pdev, pdata); | ||
169 | if (IS_ERR(host)) | ||
170 | return PTR_ERR(host); | ||
171 | |||
172 | sdhci_get_of_property(pdev); | ||
173 | |||
174 | ret = sdhci_add_host(host); | ||
175 | if (ret) | ||
176 | sdhci_pltfm_free(pdev); | ||
177 | |||
178 | return ret; | ||
163 | } | 179 | } |
180 | EXPORT_SYMBOL_GPL(sdhci_pltfm_register); | ||
164 | 181 | ||
165 | static const struct platform_device_id sdhci_pltfm_ids[] = { | 182 | int sdhci_pltfm_unregister(struct platform_device *pdev) |
166 | { "sdhci", }, | 183 | { |
167 | #ifdef CONFIG_MMC_SDHCI_CNS3XXX | 184 | struct sdhci_host *host = platform_get_drvdata(pdev); |
168 | { "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata }, | 185 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); |
169 | #endif | 186 | |
170 | #ifdef CONFIG_MMC_SDHCI_ESDHC_IMX | 187 | sdhci_remove_host(host, dead); |
171 | { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata }, | 188 | sdhci_pltfm_free(pdev); |
172 | #endif | 189 | |
173 | #ifdef CONFIG_MMC_SDHCI_DOVE | 190 | return 0; |
174 | { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata }, | 191 | } |
175 | #endif | 192 | EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister); |
176 | #ifdef CONFIG_MMC_SDHCI_TEGRA | ||
177 | { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata }, | ||
178 | #endif | ||
179 | { }, | ||
180 | }; | ||
181 | MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids); | ||
182 | 193 | ||
183 | #ifdef CONFIG_PM | 194 | #ifdef CONFIG_PM |
184 | static int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state) | 195 | int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state) |
185 | { | 196 | { |
186 | struct sdhci_host *host = platform_get_drvdata(dev); | 197 | struct sdhci_host *host = platform_get_drvdata(dev); |
187 | 198 | ||
188 | return sdhci_suspend_host(host, state); | 199 | return sdhci_suspend_host(host, state); |
189 | } | 200 | } |
201 | EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend); | ||
190 | 202 | ||
191 | static int sdhci_pltfm_resume(struct platform_device *dev) | 203 | int sdhci_pltfm_resume(struct platform_device *dev) |
192 | { | 204 | { |
193 | struct sdhci_host *host = platform_get_drvdata(dev); | 205 | struct sdhci_host *host = platform_get_drvdata(dev); |
194 | 206 | ||
195 | return sdhci_resume_host(host); | 207 | return sdhci_resume_host(host); |
196 | } | 208 | } |
197 | #else | 209 | EXPORT_SYMBOL_GPL(sdhci_pltfm_resume); |
198 | #define sdhci_pltfm_suspend NULL | ||
199 | #define sdhci_pltfm_resume NULL | ||
200 | #endif /* CONFIG_PM */ | 210 | #endif /* CONFIG_PM */ |
201 | 211 | ||
202 | static struct platform_driver sdhci_pltfm_driver = { | 212 | static int __init sdhci_pltfm_drv_init(void) |
203 | .driver = { | ||
204 | .name = "sdhci", | ||
205 | .owner = THIS_MODULE, | ||
206 | }, | ||
207 | .probe = sdhci_pltfm_probe, | ||
208 | .remove = __devexit_p(sdhci_pltfm_remove), | ||
209 | .id_table = sdhci_pltfm_ids, | ||
210 | .suspend = sdhci_pltfm_suspend, | ||
211 | .resume = sdhci_pltfm_resume, | ||
212 | }; | ||
213 | |||
214 | /*****************************************************************************\ | ||
215 | * * | ||
216 | * Driver init/exit * | ||
217 | * * | ||
218 | \*****************************************************************************/ | ||
219 | |||
220 | static int __init sdhci_drv_init(void) | ||
221 | { | 213 | { |
222 | return platform_driver_register(&sdhci_pltfm_driver); | 214 | pr_info("sdhci-pltfm: SDHCI platform and OF driver helper\n"); |
215 | |||
216 | return 0; | ||
223 | } | 217 | } |
218 | module_init(sdhci_pltfm_drv_init); | ||
224 | 219 | ||
225 | static void __exit sdhci_drv_exit(void) | 220 | static void __exit sdhci_pltfm_drv_exit(void) |
226 | { | 221 | { |
227 | platform_driver_unregister(&sdhci_pltfm_driver); | ||
228 | } | 222 | } |
223 | module_exit(sdhci_pltfm_drv_exit); | ||
229 | 224 | ||
230 | module_init(sdhci_drv_init); | 225 | MODULE_DESCRIPTION("SDHCI platform and OF driver helper"); |
231 | module_exit(sdhci_drv_exit); | 226 | MODULE_AUTHOR("Intel Corporation"); |
232 | |||
233 | MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver"); | ||
234 | MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); | ||
235 | MODULE_LICENSE("GPL v2"); | 227 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index 2b37016ad0ac..3a9fc3f40840 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h | |||
@@ -12,17 +12,95 @@ | |||
12 | #define _DRIVERS_MMC_SDHCI_PLTFM_H | 12 | #define _DRIVERS_MMC_SDHCI_PLTFM_H |
13 | 13 | ||
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/types.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/mmc/sdhci-pltfm.h> | 16 | #include "sdhci.h" |
17 | |||
18 | struct sdhci_pltfm_data { | ||
19 | struct sdhci_ops *ops; | ||
20 | unsigned int quirks; | ||
21 | }; | ||
17 | 22 | ||
18 | struct sdhci_pltfm_host { | 23 | struct sdhci_pltfm_host { |
19 | struct clk *clk; | 24 | struct clk *clk; |
20 | void *priv; /* to handle quirks across io-accessor calls */ | 25 | void *priv; /* to handle quirks across io-accessor calls */ |
26 | |||
27 | /* migrate from sdhci_of_host */ | ||
28 | unsigned int clock; | ||
29 | u16 xfer_mode_shadow; | ||
21 | }; | 30 | }; |
22 | 31 | ||
23 | extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; | 32 | #ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER |
24 | extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata; | 33 | /* |
25 | extern struct sdhci_pltfm_data sdhci_dove_pdata; | 34 | * These accessors are designed for big endian hosts doing I/O to |
26 | extern struct sdhci_pltfm_data sdhci_tegra_pdata; | 35 | * little endian controllers incorporating a 32-bit hardware byte swapper. |
36 | */ | ||
37 | static inline u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg) | ||
38 | { | ||
39 | return in_be32(host->ioaddr + reg); | ||
40 | } | ||
41 | |||
42 | static inline u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg) | ||
43 | { | ||
44 | return in_be16(host->ioaddr + (reg ^ 0x2)); | ||
45 | } | ||
46 | |||
47 | static inline u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg) | ||
48 | { | ||
49 | return in_8(host->ioaddr + (reg ^ 0x3)); | ||
50 | } | ||
51 | |||
52 | static inline void sdhci_be32bs_writel(struct sdhci_host *host, | ||
53 | u32 val, int reg) | ||
54 | { | ||
55 | out_be32(host->ioaddr + reg, val); | ||
56 | } | ||
57 | |||
58 | static inline void sdhci_be32bs_writew(struct sdhci_host *host, | ||
59 | u16 val, int reg) | ||
60 | { | ||
61 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
62 | int base = reg & ~0x3; | ||
63 | int shift = (reg & 0x2) * 8; | ||
64 | |||
65 | switch (reg) { | ||
66 | case SDHCI_TRANSFER_MODE: | ||
67 | /* | ||
68 | * Postpone this write, we must do it together with a | ||
69 | * command write that is down below. | ||
70 | */ | ||
71 | pltfm_host->xfer_mode_shadow = val; | ||
72 | return; | ||
73 | case SDHCI_COMMAND: | ||
74 | sdhci_be32bs_writel(host, | ||
75 | val << 16 | pltfm_host->xfer_mode_shadow, | ||
76 | SDHCI_TRANSFER_MODE); | ||
77 | return; | ||
78 | } | ||
79 | clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); | ||
80 | } | ||
81 | |||
82 | static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) | ||
83 | { | ||
84 | int base = reg & ~0x3; | ||
85 | int shift = (reg & 0x3) * 8; | ||
86 | |||
87 | clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); | ||
88 | } | ||
89 | #endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */ | ||
90 | |||
91 | extern void sdhci_get_of_property(struct platform_device *pdev); | ||
92 | |||
93 | extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, | ||
94 | struct sdhci_pltfm_data *pdata); | ||
95 | extern void sdhci_pltfm_free(struct platform_device *pdev); | ||
96 | |||
97 | extern int sdhci_pltfm_register(struct platform_device *pdev, | ||
98 | struct sdhci_pltfm_data *pdata); | ||
99 | extern int sdhci_pltfm_unregister(struct platform_device *pdev); | ||
100 | |||
101 | #ifdef CONFIG_PM | ||
102 | extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state); | ||
103 | extern int sdhci_pltfm_resume(struct platform_device *dev); | ||
104 | #endif | ||
27 | 105 | ||
28 | #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ | 106 | #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ |
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c deleted file mode 100644 index 089c9a68b7b1..000000000000 --- a/drivers/mmc/host/sdhci-pxa.c +++ /dev/null | |||
@@ -1,303 +0,0 @@ | |||
1 | /* linux/drivers/mmc/host/sdhci-pxa.c | ||
2 | * | ||
3 | * Copyright (C) 2010 Marvell International Ltd. | ||
4 | * Zhangfei Gao <zhangfei.gao@marvell.com> | ||
5 | * Kevin Wang <dwang4@marvell.com> | ||
6 | * Mingwei Wang <mwwang@marvell.com> | ||
7 | * Philip Rakity <prakity@marvell.com> | ||
8 | * Mark Brown <markb@marvell.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | /* Supports: | ||
16 | * SDHCI support for MMP2/PXA910/PXA168 | ||
17 | * | ||
18 | * Refer to sdhci-s3c.c. | ||
19 | */ | ||
20 | |||
21 | #include <linux/delay.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/mmc/host.h> | ||
24 | #include <linux/clk.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <plat/sdhci.h> | ||
28 | #include "sdhci.h" | ||
29 | |||
30 | #define DRIVER_NAME "sdhci-pxa" | ||
31 | |||
32 | #define SD_FIFO_PARAM 0x104 | ||
33 | #define DIS_PAD_SD_CLK_GATE 0x400 | ||
34 | |||
35 | struct sdhci_pxa { | ||
36 | struct sdhci_host *host; | ||
37 | struct sdhci_pxa_platdata *pdata; | ||
38 | struct clk *clk; | ||
39 | struct resource *res; | ||
40 | |||
41 | u8 clk_enable; | ||
42 | }; | ||
43 | |||
44 | /*****************************************************************************\ | ||
45 | * * | ||
46 | * SDHCI core callbacks * | ||
47 | * * | ||
48 | \*****************************************************************************/ | ||
49 | static void set_clock(struct sdhci_host *host, unsigned int clock) | ||
50 | { | ||
51 | struct sdhci_pxa *pxa = sdhci_priv(host); | ||
52 | u32 tmp = 0; | ||
53 | |||
54 | if (clock == 0) { | ||
55 | if (pxa->clk_enable) { | ||
56 | clk_disable(pxa->clk); | ||
57 | pxa->clk_enable = 0; | ||
58 | } | ||
59 | } else { | ||
60 | if (0 == pxa->clk_enable) { | ||
61 | if (pxa->pdata->flags & PXA_FLAG_DISABLE_CLOCK_GATING) { | ||
62 | tmp = readl(host->ioaddr + SD_FIFO_PARAM); | ||
63 | tmp |= DIS_PAD_SD_CLK_GATE; | ||
64 | writel(tmp, host->ioaddr + SD_FIFO_PARAM); | ||
65 | } | ||
66 | clk_enable(pxa->clk); | ||
67 | pxa->clk_enable = 1; | ||
68 | } | ||
69 | } | ||
70 | } | ||
71 | |||
72 | static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) | ||
73 | { | ||
74 | u16 ctrl_2; | ||
75 | |||
76 | /* | ||
77 | * Set V18_EN -- UHS modes do not work without this. | ||
78 | * does not change signaling voltage | ||
79 | */ | ||
80 | ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); | ||
81 | |||
82 | /* Select Bus Speed Mode for host */ | ||
83 | ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; | ||
84 | switch (uhs) { | ||
85 | case MMC_TIMING_UHS_SDR12: | ||
86 | ctrl_2 |= SDHCI_CTRL_UHS_SDR12; | ||
87 | break; | ||
88 | case MMC_TIMING_UHS_SDR25: | ||
89 | ctrl_2 |= SDHCI_CTRL_UHS_SDR25; | ||
90 | break; | ||
91 | case MMC_TIMING_UHS_SDR50: | ||
92 | ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180; | ||
93 | break; | ||
94 | case MMC_TIMING_UHS_SDR104: | ||
95 | ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180; | ||
96 | break; | ||
97 | case MMC_TIMING_UHS_DDR50: | ||
98 | ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180; | ||
99 | break; | ||
100 | } | ||
101 | |||
102 | sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); | ||
103 | pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n", | ||
104 | __func__, mmc_hostname(host->mmc), uhs, ctrl_2); | ||
105 | |||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static struct sdhci_ops sdhci_pxa_ops = { | ||
110 | .set_uhs_signaling = set_uhs_signaling, | ||
111 | .set_clock = set_clock, | ||
112 | }; | ||
113 | |||
114 | /*****************************************************************************\ | ||
115 | * * | ||
116 | * Device probing/removal * | ||
117 | * * | ||
118 | \*****************************************************************************/ | ||
119 | |||
120 | static int __devinit sdhci_pxa_probe(struct platform_device *pdev) | ||
121 | { | ||
122 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | ||
123 | struct device *dev = &pdev->dev; | ||
124 | struct sdhci_host *host = NULL; | ||
125 | struct resource *iomem = NULL; | ||
126 | struct sdhci_pxa *pxa = NULL; | ||
127 | int ret, irq; | ||
128 | |||
129 | irq = platform_get_irq(pdev, 0); | ||
130 | if (irq < 0) { | ||
131 | dev_err(dev, "no irq specified\n"); | ||
132 | return irq; | ||
133 | } | ||
134 | |||
135 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
136 | if (!iomem) { | ||
137 | dev_err(dev, "no memory specified\n"); | ||
138 | return -ENOENT; | ||
139 | } | ||
140 | |||
141 | host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pxa)); | ||
142 | if (IS_ERR(host)) { | ||
143 | dev_err(dev, "failed to alloc host\n"); | ||
144 | return PTR_ERR(host); | ||
145 | } | ||
146 | |||
147 | pxa = sdhci_priv(host); | ||
148 | pxa->host = host; | ||
149 | pxa->pdata = pdata; | ||
150 | pxa->clk_enable = 0; | ||
151 | |||
152 | pxa->clk = clk_get(dev, "PXA-SDHCLK"); | ||
153 | if (IS_ERR(pxa->clk)) { | ||
154 | dev_err(dev, "failed to get io clock\n"); | ||
155 | ret = PTR_ERR(pxa->clk); | ||
156 | goto out; | ||
157 | } | ||
158 | |||
159 | pxa->res = request_mem_region(iomem->start, resource_size(iomem), | ||
160 | mmc_hostname(host->mmc)); | ||
161 | if (!pxa->res) { | ||
162 | dev_err(&pdev->dev, "cannot request region\n"); | ||
163 | ret = -EBUSY; | ||
164 | goto out; | ||
165 | } | ||
166 | |||
167 | host->ioaddr = ioremap(iomem->start, resource_size(iomem)); | ||
168 | if (!host->ioaddr) { | ||
169 | dev_err(&pdev->dev, "failed to remap registers\n"); | ||
170 | ret = -ENOMEM; | ||
171 | goto out; | ||
172 | } | ||
173 | |||
174 | host->hw_name = "MMC"; | ||
175 | host->ops = &sdhci_pxa_ops; | ||
176 | host->irq = irq; | ||
177 | host->quirks = SDHCI_QUIRK_BROKEN_ADMA | ||
178 | | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | ||
179 | | SDHCI_QUIRK_32BIT_DMA_ADDR | ||
180 | | SDHCI_QUIRK_32BIT_DMA_SIZE | ||
181 | | SDHCI_QUIRK_32BIT_ADMA_SIZE | ||
182 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; | ||
183 | |||
184 | if (pdata->quirks) | ||
185 | host->quirks |= pdata->quirks; | ||
186 | |||
187 | /* enable 1/8V DDR capable */ | ||
188 | host->mmc->caps |= MMC_CAP_1_8V_DDR; | ||
189 | |||
190 | /* If slot design supports 8 bit data, indicate this to MMC. */ | ||
191 | if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) | ||
192 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
193 | |||
194 | ret = sdhci_add_host(host); | ||
195 | if (ret) { | ||
196 | dev_err(&pdev->dev, "failed to add host\n"); | ||
197 | goto out; | ||
198 | } | ||
199 | |||
200 | if (pxa->pdata->max_speed) | ||
201 | host->mmc->f_max = pxa->pdata->max_speed; | ||
202 | |||
203 | platform_set_drvdata(pdev, host); | ||
204 | |||
205 | return 0; | ||
206 | out: | ||
207 | if (host) { | ||
208 | clk_put(pxa->clk); | ||
209 | if (host->ioaddr) | ||
210 | iounmap(host->ioaddr); | ||
211 | if (pxa->res) | ||
212 | release_mem_region(pxa->res->start, | ||
213 | resource_size(pxa->res)); | ||
214 | sdhci_free_host(host); | ||
215 | } | ||
216 | |||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | static int __devexit sdhci_pxa_remove(struct platform_device *pdev) | ||
221 | { | ||
222 | struct sdhci_host *host = platform_get_drvdata(pdev); | ||
223 | struct sdhci_pxa *pxa = sdhci_priv(host); | ||
224 | int dead = 0; | ||
225 | u32 scratch; | ||
226 | |||
227 | if (host) { | ||
228 | scratch = readl(host->ioaddr + SDHCI_INT_STATUS); | ||
229 | if (scratch == (u32)-1) | ||
230 | dead = 1; | ||
231 | |||
232 | sdhci_remove_host(host, dead); | ||
233 | |||
234 | if (host->ioaddr) | ||
235 | iounmap(host->ioaddr); | ||
236 | if (pxa->res) | ||
237 | release_mem_region(pxa->res->start, | ||
238 | resource_size(pxa->res)); | ||
239 | if (pxa->clk_enable) { | ||
240 | clk_disable(pxa->clk); | ||
241 | pxa->clk_enable = 0; | ||
242 | } | ||
243 | clk_put(pxa->clk); | ||
244 | |||
245 | sdhci_free_host(host); | ||
246 | platform_set_drvdata(pdev, NULL); | ||
247 | } | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | #ifdef CONFIG_PM | ||
253 | static int sdhci_pxa_suspend(struct platform_device *dev, pm_message_t state) | ||
254 | { | ||
255 | struct sdhci_host *host = platform_get_drvdata(dev); | ||
256 | |||
257 | return sdhci_suspend_host(host, state); | ||
258 | } | ||
259 | |||
260 | static int sdhci_pxa_resume(struct platform_device *dev) | ||
261 | { | ||
262 | struct sdhci_host *host = platform_get_drvdata(dev); | ||
263 | |||
264 | return sdhci_resume_host(host); | ||
265 | } | ||
266 | #else | ||
267 | #define sdhci_pxa_suspend NULL | ||
268 | #define sdhci_pxa_resume NULL | ||
269 | #endif | ||
270 | |||
271 | static struct platform_driver sdhci_pxa_driver = { | ||
272 | .probe = sdhci_pxa_probe, | ||
273 | .remove = __devexit_p(sdhci_pxa_remove), | ||
274 | .suspend = sdhci_pxa_suspend, | ||
275 | .resume = sdhci_pxa_resume, | ||
276 | .driver = { | ||
277 | .name = DRIVER_NAME, | ||
278 | .owner = THIS_MODULE, | ||
279 | }, | ||
280 | }; | ||
281 | |||
282 | /*****************************************************************************\ | ||
283 | * * | ||
284 | * Driver init/exit * | ||
285 | * * | ||
286 | \*****************************************************************************/ | ||
287 | |||
288 | static int __init sdhci_pxa_init(void) | ||
289 | { | ||
290 | return platform_driver_register(&sdhci_pxa_driver); | ||
291 | } | ||
292 | |||
293 | static void __exit sdhci_pxa_exit(void) | ||
294 | { | ||
295 | platform_driver_unregister(&sdhci_pxa_driver); | ||
296 | } | ||
297 | |||
298 | module_init(sdhci_pxa_init); | ||
299 | module_exit(sdhci_pxa_exit); | ||
300 | |||
301 | MODULE_DESCRIPTION("SDH controller driver for PXA168/PXA910/MMP2"); | ||
302 | MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>"); | ||
303 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c new file mode 100644 index 000000000000..38f58994f79a --- /dev/null +++ b/drivers/mmc/host/sdhci-pxav2.c | |||
@@ -0,0 +1,244 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Marvell International Ltd. | ||
3 | * Zhangfei Gao <zhangfei.gao@marvell.com> | ||
4 | * Kevin Wang <dwang4@marvell.com> | ||
5 | * Jun Nie <njun@marvell.com> | ||
6 | * Qiming Wu <wuqm@marvell.com> | ||
7 | * Philip Rakity <prakity@marvell.com> | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/err.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/clk.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/gpio.h> | ||
26 | #include <linux/mmc/card.h> | ||
27 | #include <linux/mmc/host.h> | ||
28 | #include <linux/platform_data/pxa_sdhci.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include "sdhci.h" | ||
31 | #include "sdhci-pltfm.h" | ||
32 | |||
33 | #define SD_FIFO_PARAM 0xe0 | ||
34 | #define DIS_PAD_SD_CLK_GATE 0x0400 /* Turn on/off Dynamic SD Clock Gating */ | ||
35 | #define CLK_GATE_ON 0x0200 /* Disable/enable Clock Gate */ | ||
36 | #define CLK_GATE_CTL 0x0100 /* Clock Gate Control */ | ||
37 | #define CLK_GATE_SETTING_BITS (DIS_PAD_SD_CLK_GATE | \ | ||
38 | CLK_GATE_ON | CLK_GATE_CTL) | ||
39 | |||
40 | #define SD_CLOCK_BURST_SIZE_SETUP 0xe6 | ||
41 | #define SDCLK_SEL_SHIFT 8 | ||
42 | #define SDCLK_SEL_MASK 0x3 | ||
43 | #define SDCLK_DELAY_SHIFT 10 | ||
44 | #define SDCLK_DELAY_MASK 0x3c | ||
45 | |||
46 | #define SD_CE_ATA_2 0xea | ||
47 | #define MMC_CARD 0x1000 | ||
48 | #define MMC_WIDTH 0x0100 | ||
49 | |||
50 | static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask) | ||
51 | { | ||
52 | struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); | ||
53 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | ||
54 | |||
55 | if (mask == SDHCI_RESET_ALL) { | ||
56 | u16 tmp = 0; | ||
57 | |||
58 | /* | ||
59 | * tune timing of read data/command when crc error happen | ||
60 | * no performance impact | ||
61 | */ | ||
62 | if (pdata->clk_delay_sel == 1) { | ||
63 | tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); | ||
64 | |||
65 | tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT); | ||
66 | tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK) | ||
67 | << SDCLK_DELAY_SHIFT; | ||
68 | tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT); | ||
69 | tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT; | ||
70 | |||
71 | writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); | ||
72 | } | ||
73 | |||
74 | if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING) { | ||
75 | tmp = readw(host->ioaddr + SD_FIFO_PARAM); | ||
76 | tmp &= ~CLK_GATE_SETTING_BITS; | ||
77 | writew(tmp, host->ioaddr + SD_FIFO_PARAM); | ||
78 | } else { | ||
79 | tmp = readw(host->ioaddr + SD_FIFO_PARAM); | ||
80 | tmp &= ~CLK_GATE_SETTING_BITS; | ||
81 | tmp |= CLK_GATE_SETTING_BITS; | ||
82 | writew(tmp, host->ioaddr + SD_FIFO_PARAM); | ||
83 | } | ||
84 | } | ||
85 | } | ||
86 | |||
87 | static int pxav2_mmc_set_width(struct sdhci_host *host, int width) | ||
88 | { | ||
89 | u8 ctrl; | ||
90 | u16 tmp; | ||
91 | |||
92 | ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); | ||
93 | tmp = readw(host->ioaddr + SD_CE_ATA_2); | ||
94 | if (width == MMC_BUS_WIDTH_8) { | ||
95 | ctrl &= ~SDHCI_CTRL_4BITBUS; | ||
96 | tmp |= MMC_CARD | MMC_WIDTH; | ||
97 | } else { | ||
98 | tmp &= ~(MMC_CARD | MMC_WIDTH); | ||
99 | if (width == MMC_BUS_WIDTH_4) | ||
100 | ctrl |= SDHCI_CTRL_4BITBUS; | ||
101 | else | ||
102 | ctrl &= ~SDHCI_CTRL_4BITBUS; | ||
103 | } | ||
104 | writew(tmp, host->ioaddr + SD_CE_ATA_2); | ||
105 | writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static u32 pxav2_get_max_clock(struct sdhci_host *host) | ||
111 | { | ||
112 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
113 | |||
114 | return clk_get_rate(pltfm_host->clk); | ||
115 | } | ||
116 | |||
117 | static struct sdhci_ops pxav2_sdhci_ops = { | ||
118 | .get_max_clock = pxav2_get_max_clock, | ||
119 | .platform_reset_exit = pxav2_set_private_registers, | ||
120 | .platform_8bit_width = pxav2_mmc_set_width, | ||
121 | }; | ||
122 | |||
123 | static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) | ||
124 | { | ||
125 | struct sdhci_pltfm_host *pltfm_host; | ||
126 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | ||
127 | struct device *dev = &pdev->dev; | ||
128 | struct sdhci_host *host = NULL; | ||
129 | struct sdhci_pxa *pxa = NULL; | ||
130 | int ret; | ||
131 | struct clk *clk; | ||
132 | |||
133 | pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL); | ||
134 | if (!pxa) | ||
135 | return -ENOMEM; | ||
136 | |||
137 | host = sdhci_pltfm_init(pdev, NULL); | ||
138 | if (IS_ERR(host)) { | ||
139 | kfree(pxa); | ||
140 | return PTR_ERR(host); | ||
141 | } | ||
142 | pltfm_host = sdhci_priv(host); | ||
143 | pltfm_host->priv = pxa; | ||
144 | |||
145 | clk = clk_get(dev, "PXA-SDHCLK"); | ||
146 | if (IS_ERR(clk)) { | ||
147 | dev_err(dev, "failed to get io clock\n"); | ||
148 | ret = PTR_ERR(clk); | ||
149 | goto err_clk_get; | ||
150 | } | ||
151 | pltfm_host->clk = clk; | ||
152 | clk_enable(clk); | ||
153 | |||
154 | host->quirks = SDHCI_QUIRK_BROKEN_ADMA | ||
155 | | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | ||
156 | | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; | ||
157 | |||
158 | if (pdata) { | ||
159 | if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { | ||
160 | /* on-chip device */ | ||
161 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; | ||
162 | host->mmc->caps |= MMC_CAP_NONREMOVABLE; | ||
163 | } | ||
164 | |||
165 | /* If slot design supports 8 bit data, indicate this to MMC. */ | ||
166 | if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) | ||
167 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
168 | |||
169 | if (pdata->quirks) | ||
170 | host->quirks |= pdata->quirks; | ||
171 | if (pdata->host_caps) | ||
172 | host->mmc->caps |= pdata->host_caps; | ||
173 | if (pdata->pm_caps) | ||
174 | host->mmc->pm_caps |= pdata->pm_caps; | ||
175 | } | ||
176 | |||
177 | host->ops = &pxav2_sdhci_ops; | ||
178 | |||
179 | ret = sdhci_add_host(host); | ||
180 | if (ret) { | ||
181 | dev_err(&pdev->dev, "failed to add host\n"); | ||
182 | goto err_add_host; | ||
183 | } | ||
184 | |||
185 | platform_set_drvdata(pdev, host); | ||
186 | |||
187 | return 0; | ||
188 | |||
189 | err_add_host: | ||
190 | clk_disable(clk); | ||
191 | clk_put(clk); | ||
192 | err_clk_get: | ||
193 | sdhci_pltfm_free(pdev); | ||
194 | kfree(pxa); | ||
195 | return ret; | ||
196 | } | ||
197 | |||
198 | static int __devexit sdhci_pxav2_remove(struct platform_device *pdev) | ||
199 | { | ||
200 | struct sdhci_host *host = platform_get_drvdata(pdev); | ||
201 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
202 | struct sdhci_pxa *pxa = pltfm_host->priv; | ||
203 | |||
204 | sdhci_remove_host(host, 1); | ||
205 | |||
206 | clk_disable(pltfm_host->clk); | ||
207 | clk_put(pltfm_host->clk); | ||
208 | sdhci_pltfm_free(pdev); | ||
209 | kfree(pxa); | ||
210 | |||
211 | platform_set_drvdata(pdev, NULL); | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static struct platform_driver sdhci_pxav2_driver = { | ||
217 | .driver = { | ||
218 | .name = "sdhci-pxav2", | ||
219 | .owner = THIS_MODULE, | ||
220 | }, | ||
221 | .probe = sdhci_pxav2_probe, | ||
222 | .remove = __devexit_p(sdhci_pxav2_remove), | ||
223 | #ifdef CONFIG_PM | ||
224 | .suspend = sdhci_pltfm_suspend, | ||
225 | .resume = sdhci_pltfm_resume, | ||
226 | #endif | ||
227 | }; | ||
228 | static int __init sdhci_pxav2_init(void) | ||
229 | { | ||
230 | return platform_driver_register(&sdhci_pxav2_driver); | ||
231 | } | ||
232 | |||
233 | static void __exit sdhci_pxav2_exit(void) | ||
234 | { | ||
235 | platform_driver_unregister(&sdhci_pxav2_driver); | ||
236 | } | ||
237 | |||
238 | module_init(sdhci_pxav2_init); | ||
239 | module_exit(sdhci_pxav2_exit); | ||
240 | |||
241 | MODULE_DESCRIPTION("SDHCI driver for pxav2"); | ||
242 | MODULE_AUTHOR("Marvell International Ltd."); | ||
243 | MODULE_LICENSE("GPL v2"); | ||
244 | |||
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c new file mode 100644 index 000000000000..4198dbbc5c20 --- /dev/null +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
@@ -0,0 +1,289 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Marvell International Ltd. | ||
3 | * Zhangfei Gao <zhangfei.gao@marvell.com> | ||
4 | * Kevin Wang <dwang4@marvell.com> | ||
5 | * Mingwei Wang <mwwang@marvell.com> | ||
6 | * Philip Rakity <prakity@marvell.com> | ||
7 | * Mark Brown <markb@marvell.com> | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/clk.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/gpio.h> | ||
25 | #include <linux/mmc/card.h> | ||
26 | #include <linux/mmc/host.h> | ||
27 | #include <linux/platform_data/pxa_sdhci.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include "sdhci.h" | ||
31 | #include "sdhci-pltfm.h" | ||
32 | |||
33 | #define SD_CLOCK_BURST_SIZE_SETUP 0x10A | ||
34 | #define SDCLK_SEL 0x100 | ||
35 | #define SDCLK_DELAY_SHIFT 9 | ||
36 | #define SDCLK_DELAY_MASK 0x1f | ||
37 | |||
38 | #define SD_CFG_FIFO_PARAM 0x100 | ||
39 | #define SDCFG_GEN_PAD_CLK_ON (1<<6) | ||
40 | #define SDCFG_GEN_PAD_CLK_CNT_MASK 0xFF | ||
41 | #define SDCFG_GEN_PAD_CLK_CNT_SHIFT 24 | ||
42 | |||
43 | #define SD_SPI_MODE 0x108 | ||
44 | #define SD_CE_ATA_1 0x10C | ||
45 | |||
46 | #define SD_CE_ATA_2 0x10E | ||
47 | #define SDCE_MISC_INT (1<<2) | ||
48 | #define SDCE_MISC_INT_EN (1<<1) | ||
49 | |||
50 | static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask) | ||
51 | { | ||
52 | struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); | ||
53 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | ||
54 | |||
55 | if (mask == SDHCI_RESET_ALL) { | ||
56 | /* | ||
57 | * tune timing of read data/command when crc error happen | ||
58 | * no performance impact | ||
59 | */ | ||
60 | if (pdata && 0 != pdata->clk_delay_cycles) { | ||
61 | u16 tmp; | ||
62 | |||
63 | tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); | ||
64 | tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK) | ||
65 | << SDCLK_DELAY_SHIFT; | ||
66 | tmp |= SDCLK_SEL; | ||
67 | writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); | ||
68 | } | ||
69 | } | ||
70 | } | ||
71 | |||
72 | #define MAX_WAIT_COUNT 5 | ||
73 | static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode) | ||
74 | { | ||
75 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
76 | struct sdhci_pxa *pxa = pltfm_host->priv; | ||
77 | u16 tmp; | ||
78 | int count; | ||
79 | |||
80 | if (pxa->power_mode == MMC_POWER_UP | ||
81 | && power_mode == MMC_POWER_ON) { | ||
82 | |||
83 | dev_dbg(mmc_dev(host->mmc), | ||
84 | "%s: slot->power_mode = %d," | ||
85 | "ios->power_mode = %d\n", | ||
86 | __func__, | ||
87 | pxa->power_mode, | ||
88 | power_mode); | ||
89 | |||
90 | /* set we want notice of when 74 clocks are sent */ | ||
91 | tmp = readw(host->ioaddr + SD_CE_ATA_2); | ||
92 | tmp |= SDCE_MISC_INT_EN; | ||
93 | writew(tmp, host->ioaddr + SD_CE_ATA_2); | ||
94 | |||
95 | /* start sending the 74 clocks */ | ||
96 | tmp = readw(host->ioaddr + SD_CFG_FIFO_PARAM); | ||
97 | tmp |= SDCFG_GEN_PAD_CLK_ON; | ||
98 | writew(tmp, host->ioaddr + SD_CFG_FIFO_PARAM); | ||
99 | |||
100 | /* slowest speed is about 100KHz or 10usec per clock */ | ||
101 | udelay(740); | ||
102 | count = 0; | ||
103 | |||
104 | while (count++ < MAX_WAIT_COUNT) { | ||
105 | if ((readw(host->ioaddr + SD_CE_ATA_2) | ||
106 | & SDCE_MISC_INT) == 0) | ||
107 | break; | ||
108 | udelay(10); | ||
109 | } | ||
110 | |||
111 | if (count == MAX_WAIT_COUNT) | ||
112 | dev_warn(mmc_dev(host->mmc), "74 clock interrupt not cleared\n"); | ||
113 | |||
114 | /* clear the interrupt bit if posted */ | ||
115 | tmp = readw(host->ioaddr + SD_CE_ATA_2); | ||
116 | tmp |= SDCE_MISC_INT; | ||
117 | writew(tmp, host->ioaddr + SD_CE_ATA_2); | ||
118 | } | ||
119 | pxa->power_mode = power_mode; | ||
120 | } | ||
121 | |||
122 | static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) | ||
123 | { | ||
124 | u16 ctrl_2; | ||
125 | |||
126 | /* | ||
127 | * Set V18_EN -- UHS modes do not work without this. | ||
128 | * does not change signaling voltage | ||
129 | */ | ||
130 | ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); | ||
131 | |||
132 | /* Select Bus Speed Mode for host */ | ||
133 | ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; | ||
134 | switch (uhs) { | ||
135 | case MMC_TIMING_UHS_SDR12: | ||
136 | ctrl_2 |= SDHCI_CTRL_UHS_SDR12; | ||
137 | break; | ||
138 | case MMC_TIMING_UHS_SDR25: | ||
139 | ctrl_2 |= SDHCI_CTRL_UHS_SDR25; | ||
140 | break; | ||
141 | case MMC_TIMING_UHS_SDR50: | ||
142 | ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180; | ||
143 | break; | ||
144 | case MMC_TIMING_UHS_SDR104: | ||
145 | ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180; | ||
146 | break; | ||
147 | case MMC_TIMING_UHS_DDR50: | ||
148 | ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180; | ||
149 | break; | ||
150 | } | ||
151 | |||
152 | sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); | ||
153 | dev_dbg(mmc_dev(host->mmc), | ||
154 | "%s uhs = %d, ctrl_2 = %04X\n", | ||
155 | __func__, uhs, ctrl_2); | ||
156 | |||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static struct sdhci_ops pxav3_sdhci_ops = { | ||
161 | .platform_reset_exit = pxav3_set_private_registers, | ||
162 | .set_uhs_signaling = pxav3_set_uhs_signaling, | ||
163 | .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, | ||
164 | }; | ||
165 | |||
166 | static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) | ||
167 | { | ||
168 | struct sdhci_pltfm_host *pltfm_host; | ||
169 | struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; | ||
170 | struct device *dev = &pdev->dev; | ||
171 | struct sdhci_host *host = NULL; | ||
172 | struct sdhci_pxa *pxa = NULL; | ||
173 | int ret; | ||
174 | struct clk *clk; | ||
175 | |||
176 | pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL); | ||
177 | if (!pxa) | ||
178 | return -ENOMEM; | ||
179 | |||
180 | host = sdhci_pltfm_init(pdev, NULL); | ||
181 | if (IS_ERR(host)) { | ||
182 | kfree(pxa); | ||
183 | return PTR_ERR(host); | ||
184 | } | ||
185 | pltfm_host = sdhci_priv(host); | ||
186 | pltfm_host->priv = pxa; | ||
187 | |||
188 | clk = clk_get(dev, "PXA-SDHCLK"); | ||
189 | if (IS_ERR(clk)) { | ||
190 | dev_err(dev, "failed to get io clock\n"); | ||
191 | ret = PTR_ERR(clk); | ||
192 | goto err_clk_get; | ||
193 | } | ||
194 | pltfm_host->clk = clk; | ||
195 | clk_enable(clk); | ||
196 | |||
197 | host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | ||
198 | | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; | ||
199 | |||
200 | /* enable 1/8V DDR capable */ | ||
201 | host->mmc->caps |= MMC_CAP_1_8V_DDR; | ||
202 | |||
203 | if (pdata) { | ||
204 | if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { | ||
205 | /* on-chip device */ | ||
206 | host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; | ||
207 | host->mmc->caps |= MMC_CAP_NONREMOVABLE; | ||
208 | } | ||
209 | |||
210 | /* If slot design supports 8 bit data, indicate this to MMC. */ | ||
211 | if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) | ||
212 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
213 | |||
214 | if (pdata->quirks) | ||
215 | host->quirks |= pdata->quirks; | ||
216 | if (pdata->host_caps) | ||
217 | host->mmc->caps |= pdata->host_caps; | ||
218 | if (pdata->pm_caps) | ||
219 | host->mmc->pm_caps |= pdata->pm_caps; | ||
220 | } | ||
221 | |||
222 | host->ops = &pxav3_sdhci_ops; | ||
223 | |||
224 | ret = sdhci_add_host(host); | ||
225 | if (ret) { | ||
226 | dev_err(&pdev->dev, "failed to add host\n"); | ||
227 | goto err_add_host; | ||
228 | } | ||
229 | |||
230 | platform_set_drvdata(pdev, host); | ||
231 | |||
232 | return 0; | ||
233 | |||
234 | err_add_host: | ||
235 | clk_disable(clk); | ||
236 | clk_put(clk); | ||
237 | err_clk_get: | ||
238 | sdhci_pltfm_free(pdev); | ||
239 | kfree(pxa); | ||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | static int __devexit sdhci_pxav3_remove(struct platform_device *pdev) | ||
244 | { | ||
245 | struct sdhci_host *host = platform_get_drvdata(pdev); | ||
246 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
247 | struct sdhci_pxa *pxa = pltfm_host->priv; | ||
248 | |||
249 | sdhci_remove_host(host, 1); | ||
250 | |||
251 | clk_disable(pltfm_host->clk); | ||
252 | clk_put(pltfm_host->clk); | ||
253 | sdhci_pltfm_free(pdev); | ||
254 | kfree(pxa); | ||
255 | |||
256 | platform_set_drvdata(pdev, NULL); | ||
257 | |||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | static struct platform_driver sdhci_pxav3_driver = { | ||
262 | .driver = { | ||
263 | .name = "sdhci-pxav3", | ||
264 | .owner = THIS_MODULE, | ||
265 | }, | ||
266 | .probe = sdhci_pxav3_probe, | ||
267 | .remove = __devexit_p(sdhci_pxav3_remove), | ||
268 | #ifdef CONFIG_PM | ||
269 | .suspend = sdhci_pltfm_suspend, | ||
270 | .resume = sdhci_pltfm_resume, | ||
271 | #endif | ||
272 | }; | ||
273 | static int __init sdhci_pxav3_init(void) | ||
274 | { | ||
275 | return platform_driver_register(&sdhci_pxav3_driver); | ||
276 | } | ||
277 | |||
278 | static void __exit sdhci_pxav3_exit(void) | ||
279 | { | ||
280 | platform_driver_unregister(&sdhci_pxav3_driver); | ||
281 | } | ||
282 | |||
283 | module_init(sdhci_pxav3_init); | ||
284 | module_exit(sdhci_pxav3_exit); | ||
285 | |||
286 | MODULE_DESCRIPTION("SDHCI driver for pxav3"); | ||
287 | MODULE_AUTHOR("Marvell International Ltd."); | ||
288 | MODULE_LICENSE("GPL v2"); | ||
289 | |||
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 69e3ee321eb5..460ffaf0f6d7 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c | |||
@@ -612,16 +612,14 @@ static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm) | |||
612 | { | 612 | { |
613 | struct sdhci_host *host = platform_get_drvdata(dev); | 613 | struct sdhci_host *host = platform_get_drvdata(dev); |
614 | 614 | ||
615 | sdhci_suspend_host(host, pm); | 615 | return sdhci_suspend_host(host, pm); |
616 | return 0; | ||
617 | } | 616 | } |
618 | 617 | ||
619 | static int sdhci_s3c_resume(struct platform_device *dev) | 618 | static int sdhci_s3c_resume(struct platform_device *dev) |
620 | { | 619 | { |
621 | struct sdhci_host *host = platform_get_drvdata(dev); | 620 | struct sdhci_host *host = platform_get_drvdata(dev); |
622 | 621 | ||
623 | sdhci_resume_host(host); | 622 | return sdhci_resume_host(host); |
624 | return 0; | ||
625 | } | 623 | } |
626 | 624 | ||
627 | #else | 625 | #else |
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 343c97edba32..18b0bd31de78 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <mach/gpio.h> | 24 | #include <mach/gpio.h> |
25 | #include <mach/sdhci.h> | 25 | #include <mach/sdhci.h> |
26 | 26 | ||
27 | #include "sdhci.h" | ||
28 | #include "sdhci-pltfm.h" | 27 | #include "sdhci-pltfm.h" |
29 | 28 | ||
30 | static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) | 29 | static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) |
@@ -116,20 +115,42 @@ static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) | |||
116 | return 0; | 115 | return 0; |
117 | } | 116 | } |
118 | 117 | ||
118 | static struct sdhci_ops tegra_sdhci_ops = { | ||
119 | .get_ro = tegra_sdhci_get_ro, | ||
120 | .read_l = tegra_sdhci_readl, | ||
121 | .read_w = tegra_sdhci_readw, | ||
122 | .write_l = tegra_sdhci_writel, | ||
123 | .platform_8bit_width = tegra_sdhci_8bit, | ||
124 | }; | ||
125 | |||
126 | static struct sdhci_pltfm_data sdhci_tegra_pdata = { | ||
127 | .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | | ||
128 | SDHCI_QUIRK_SINGLE_POWER_WRITE | | ||
129 | SDHCI_QUIRK_NO_HISPD_BIT | | ||
130 | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, | ||
131 | .ops = &tegra_sdhci_ops, | ||
132 | }; | ||
119 | 133 | ||
120 | static int tegra_sdhci_pltfm_init(struct sdhci_host *host, | 134 | static int __devinit sdhci_tegra_probe(struct platform_device *pdev) |
121 | struct sdhci_pltfm_data *pdata) | ||
122 | { | 135 | { |
123 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 136 | struct sdhci_pltfm_host *pltfm_host; |
124 | struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); | ||
125 | struct tegra_sdhci_platform_data *plat; | 137 | struct tegra_sdhci_platform_data *plat; |
138 | struct sdhci_host *host; | ||
126 | struct clk *clk; | 139 | struct clk *clk; |
127 | int rc; | 140 | int rc; |
128 | 141 | ||
142 | host = sdhci_pltfm_init(pdev, &sdhci_tegra_pdata); | ||
143 | if (IS_ERR(host)) | ||
144 | return PTR_ERR(host); | ||
145 | |||
146 | pltfm_host = sdhci_priv(host); | ||
147 | |||
129 | plat = pdev->dev.platform_data; | 148 | plat = pdev->dev.platform_data; |
149 | |||
130 | if (plat == NULL) { | 150 | if (plat == NULL) { |
131 | dev_err(mmc_dev(host->mmc), "missing platform data\n"); | 151 | dev_err(mmc_dev(host->mmc), "missing platform data\n"); |
132 | return -ENXIO; | 152 | rc = -ENXIO; |
153 | goto err_no_plat; | ||
133 | } | 154 | } |
134 | 155 | ||
135 | if (gpio_is_valid(plat->power_gpio)) { | 156 | if (gpio_is_valid(plat->power_gpio)) { |
@@ -137,7 +158,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host, | |||
137 | if (rc) { | 158 | if (rc) { |
138 | dev_err(mmc_dev(host->mmc), | 159 | dev_err(mmc_dev(host->mmc), |
139 | "failed to allocate power gpio\n"); | 160 | "failed to allocate power gpio\n"); |
140 | goto out; | 161 | goto err_power_req; |
141 | } | 162 | } |
142 | tegra_gpio_enable(plat->power_gpio); | 163 | tegra_gpio_enable(plat->power_gpio); |
143 | gpio_direction_output(plat->power_gpio, 1); | 164 | gpio_direction_output(plat->power_gpio, 1); |
@@ -148,7 +169,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host, | |||
148 | if (rc) { | 169 | if (rc) { |
149 | dev_err(mmc_dev(host->mmc), | 170 | dev_err(mmc_dev(host->mmc), |
150 | "failed to allocate cd gpio\n"); | 171 | "failed to allocate cd gpio\n"); |
151 | goto out_power; | 172 | goto err_cd_req; |
152 | } | 173 | } |
153 | tegra_gpio_enable(plat->cd_gpio); | 174 | tegra_gpio_enable(plat->cd_gpio); |
154 | gpio_direction_input(plat->cd_gpio); | 175 | gpio_direction_input(plat->cd_gpio); |
@@ -159,7 +180,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host, | |||
159 | 180 | ||
160 | if (rc) { | 181 | if (rc) { |
161 | dev_err(mmc_dev(host->mmc), "request irq error\n"); | 182 | dev_err(mmc_dev(host->mmc), "request irq error\n"); |
162 | goto out_cd; | 183 | goto err_cd_irq_req; |
163 | } | 184 | } |
164 | 185 | ||
165 | } | 186 | } |
@@ -169,7 +190,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host, | |||
169 | if (rc) { | 190 | if (rc) { |
170 | dev_err(mmc_dev(host->mmc), | 191 | dev_err(mmc_dev(host->mmc), |
171 | "failed to allocate wp gpio\n"); | 192 | "failed to allocate wp gpio\n"); |
172 | goto out_irq; | 193 | goto err_wp_req; |
173 | } | 194 | } |
174 | tegra_gpio_enable(plat->wp_gpio); | 195 | tegra_gpio_enable(plat->wp_gpio); |
175 | gpio_direction_input(plat->wp_gpio); | 196 | gpio_direction_input(plat->wp_gpio); |
@@ -179,7 +200,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host, | |||
179 | if (IS_ERR(clk)) { | 200 | if (IS_ERR(clk)) { |
180 | dev_err(mmc_dev(host->mmc), "clk err\n"); | 201 | dev_err(mmc_dev(host->mmc), "clk err\n"); |
181 | rc = PTR_ERR(clk); | 202 | rc = PTR_ERR(clk); |
182 | goto out_wp; | 203 | goto err_clk_get; |
183 | } | 204 | } |
184 | clk_enable(clk); | 205 | clk_enable(clk); |
185 | pltfm_host->clk = clk; | 206 | pltfm_host->clk = clk; |
@@ -189,38 +210,47 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host, | |||
189 | if (plat->is_8bit) | 210 | if (plat->is_8bit) |
190 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; | 211 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; |
191 | 212 | ||
213 | rc = sdhci_add_host(host); | ||
214 | if (rc) | ||
215 | goto err_add_host; | ||
216 | |||
192 | return 0; | 217 | return 0; |
193 | 218 | ||
194 | out_wp: | 219 | err_add_host: |
220 | clk_disable(pltfm_host->clk); | ||
221 | clk_put(pltfm_host->clk); | ||
222 | err_clk_get: | ||
195 | if (gpio_is_valid(plat->wp_gpio)) { | 223 | if (gpio_is_valid(plat->wp_gpio)) { |
196 | tegra_gpio_disable(plat->wp_gpio); | 224 | tegra_gpio_disable(plat->wp_gpio); |
197 | gpio_free(plat->wp_gpio); | 225 | gpio_free(plat->wp_gpio); |
198 | } | 226 | } |
199 | 227 | err_wp_req: | |
200 | out_irq: | ||
201 | if (gpio_is_valid(plat->cd_gpio)) | 228 | if (gpio_is_valid(plat->cd_gpio)) |
202 | free_irq(gpio_to_irq(plat->cd_gpio), host); | 229 | free_irq(gpio_to_irq(plat->cd_gpio), host); |
203 | out_cd: | 230 | err_cd_irq_req: |
204 | if (gpio_is_valid(plat->cd_gpio)) { | 231 | if (gpio_is_valid(plat->cd_gpio)) { |
205 | tegra_gpio_disable(plat->cd_gpio); | 232 | tegra_gpio_disable(plat->cd_gpio); |
206 | gpio_free(plat->cd_gpio); | 233 | gpio_free(plat->cd_gpio); |
207 | } | 234 | } |
208 | 235 | err_cd_req: | |
209 | out_power: | ||
210 | if (gpio_is_valid(plat->power_gpio)) { | 236 | if (gpio_is_valid(plat->power_gpio)) { |
211 | tegra_gpio_disable(plat->power_gpio); | 237 | tegra_gpio_disable(plat->power_gpio); |
212 | gpio_free(plat->power_gpio); | 238 | gpio_free(plat->power_gpio); |
213 | } | 239 | } |
214 | 240 | err_power_req: | |
215 | out: | 241 | err_no_plat: |
242 | sdhci_pltfm_free(pdev); | ||
216 | return rc; | 243 | return rc; |
217 | } | 244 | } |
218 | 245 | ||
219 | static void tegra_sdhci_pltfm_exit(struct sdhci_host *host) | 246 | static int __devexit sdhci_tegra_remove(struct platform_device *pdev) |
220 | { | 247 | { |
248 | struct sdhci_host *host = platform_get_drvdata(pdev); | ||
221 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 249 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
222 | struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); | ||
223 | struct tegra_sdhci_platform_data *plat; | 250 | struct tegra_sdhci_platform_data *plat; |
251 | int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); | ||
252 | |||
253 | sdhci_remove_host(host, dead); | ||
224 | 254 | ||
225 | plat = pdev->dev.platform_data; | 255 | plat = pdev->dev.platform_data; |
226 | 256 | ||
@@ -242,22 +272,37 @@ static void tegra_sdhci_pltfm_exit(struct sdhci_host *host) | |||
242 | 272 | ||
243 | clk_disable(pltfm_host->clk); | 273 | clk_disable(pltfm_host->clk); |
244 | clk_put(pltfm_host->clk); | 274 | clk_put(pltfm_host->clk); |
275 | |||
276 | sdhci_pltfm_free(pdev); | ||
277 | |||
278 | return 0; | ||
245 | } | 279 | } |
246 | 280 | ||
247 | static struct sdhci_ops tegra_sdhci_ops = { | 281 | static struct platform_driver sdhci_tegra_driver = { |
248 | .get_ro = tegra_sdhci_get_ro, | 282 | .driver = { |
249 | .read_l = tegra_sdhci_readl, | 283 | .name = "sdhci-tegra", |
250 | .read_w = tegra_sdhci_readw, | 284 | .owner = THIS_MODULE, |
251 | .write_l = tegra_sdhci_writel, | 285 | }, |
252 | .platform_8bit_width = tegra_sdhci_8bit, | 286 | .probe = sdhci_tegra_probe, |
287 | .remove = __devexit_p(sdhci_tegra_remove), | ||
288 | #ifdef CONFIG_PM | ||
289 | .suspend = sdhci_pltfm_suspend, | ||
290 | .resume = sdhci_pltfm_resume, | ||
291 | #endif | ||
253 | }; | 292 | }; |
254 | 293 | ||
255 | struct sdhci_pltfm_data sdhci_tegra_pdata = { | 294 | static int __init sdhci_tegra_init(void) |
256 | .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | | 295 | { |
257 | SDHCI_QUIRK_SINGLE_POWER_WRITE | | 296 | return platform_driver_register(&sdhci_tegra_driver); |
258 | SDHCI_QUIRK_NO_HISPD_BIT | | 297 | } |
259 | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, | 298 | module_init(sdhci_tegra_init); |
260 | .ops = &tegra_sdhci_ops, | 299 | |
261 | .init = tegra_sdhci_pltfm_init, | 300 | static void __exit sdhci_tegra_exit(void) |
262 | .exit = tegra_sdhci_pltfm_exit, | 301 | { |
263 | }; | 302 | platform_driver_unregister(&sdhci_tegra_driver); |
303 | } | ||
304 | module_exit(sdhci_tegra_exit); | ||
305 | |||
306 | MODULE_DESCRIPTION("SDHCI driver for Tegra"); | ||
307 | MODULE_AUTHOR(" Google, Inc."); | ||
308 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 58d5436ff649..c31a3343340d 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -127,11 +127,15 @@ static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs) | |||
127 | 127 | ||
128 | static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) | 128 | static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) |
129 | { | 129 | { |
130 | u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; | 130 | u32 present, irqs; |
131 | 131 | ||
132 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) | 132 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) |
133 | return; | 133 | return; |
134 | 134 | ||
135 | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & | ||
136 | SDHCI_CARD_PRESENT; | ||
137 | irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT; | ||
138 | |||
135 | if (enable) | 139 | if (enable) |
136 | sdhci_unmask_irqs(host, irqs); | 140 | sdhci_unmask_irqs(host, irqs); |
137 | else | 141 | else |
@@ -2154,13 +2158,30 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) | |||
2154 | mmc_hostname(host->mmc), intmask); | 2158 | mmc_hostname(host->mmc), intmask); |
2155 | 2159 | ||
2156 | if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { | 2160 | if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { |
2161 | u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & | ||
2162 | SDHCI_CARD_PRESENT; | ||
2163 | |||
2164 | /* | ||
2165 | * There is a observation on i.mx esdhc. INSERT bit will be | ||
2166 | * immediately set again when it gets cleared, if a card is | ||
2167 | * inserted. We have to mask the irq to prevent interrupt | ||
2168 | * storm which will freeze the system. And the REMOVE gets | ||
2169 | * the same situation. | ||
2170 | * | ||
2171 | * More testing are needed here to ensure it works for other | ||
2172 | * platforms though. | ||
2173 | */ | ||
2174 | sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT : | ||
2175 | SDHCI_INT_CARD_REMOVE); | ||
2176 | sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE : | ||
2177 | SDHCI_INT_CARD_INSERT); | ||
2178 | |||
2157 | sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | | 2179 | sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | |
2158 | SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); | 2180 | SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); |
2181 | intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); | ||
2159 | tasklet_schedule(&host->card_tasklet); | 2182 | tasklet_schedule(&host->card_tasklet); |
2160 | } | 2183 | } |
2161 | 2184 | ||
2162 | intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); | ||
2163 | |||
2164 | if (intmask & SDHCI_INT_CMD_MASK) { | 2185 | if (intmask & SDHCI_INT_CMD_MASK) { |
2165 | sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, | 2186 | sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, |
2166 | SDHCI_INT_STATUS); | 2187 | SDHCI_INT_STATUS); |
@@ -2488,6 +2509,11 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2488 | } else | 2509 | } else |
2489 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; | 2510 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; |
2490 | 2511 | ||
2512 | if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) | ||
2513 | mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000); | ||
2514 | else | ||
2515 | mmc->max_discard_to = (1 << 27) / host->timeout_clk; | ||
2516 | |||
2491 | mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; | 2517 | mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; |
2492 | 2518 | ||
2493 | if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) | 2519 | if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 14f8edbaa195..557886bee9ce 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
@@ -175,6 +175,7 @@ struct sh_mmcif_host { | |||
175 | enum mmcif_state state; | 175 | enum mmcif_state state; |
176 | spinlock_t lock; | 176 | spinlock_t lock; |
177 | bool power; | 177 | bool power; |
178 | bool card_present; | ||
178 | 179 | ||
179 | /* DMA support */ | 180 | /* DMA support */ |
180 | struct dma_chan *chan_rx; | 181 | struct dma_chan *chan_rx; |
@@ -877,23 +878,23 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
877 | spin_unlock_irqrestore(&host->lock, flags); | 878 | spin_unlock_irqrestore(&host->lock, flags); |
878 | 879 | ||
879 | if (ios->power_mode == MMC_POWER_UP) { | 880 | if (ios->power_mode == MMC_POWER_UP) { |
880 | if (p->set_pwr) | 881 | if (!host->card_present) { |
881 | p->set_pwr(host->pd, ios->power_mode); | ||
882 | if (!host->power) { | ||
883 | /* See if we also get DMA */ | 882 | /* See if we also get DMA */ |
884 | sh_mmcif_request_dma(host, host->pd->dev.platform_data); | 883 | sh_mmcif_request_dma(host, host->pd->dev.platform_data); |
885 | pm_runtime_get_sync(&host->pd->dev); | 884 | host->card_present = true; |
886 | host->power = true; | ||
887 | } | 885 | } |
888 | } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { | 886 | } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { |
889 | /* clock stop */ | 887 | /* clock stop */ |
890 | sh_mmcif_clock_control(host, 0); | 888 | sh_mmcif_clock_control(host, 0); |
891 | if (ios->power_mode == MMC_POWER_OFF) { | 889 | if (ios->power_mode == MMC_POWER_OFF) { |
892 | if (host->power) { | 890 | if (host->card_present) { |
893 | pm_runtime_put(&host->pd->dev); | ||
894 | sh_mmcif_release_dma(host); | 891 | sh_mmcif_release_dma(host); |
895 | host->power = false; | 892 | host->card_present = false; |
896 | } | 893 | } |
894 | } | ||
895 | if (host->power) { | ||
896 | pm_runtime_put(&host->pd->dev); | ||
897 | host->power = false; | ||
897 | if (p->down_pwr) | 898 | if (p->down_pwr) |
898 | p->down_pwr(host->pd); | 899 | p->down_pwr(host->pd); |
899 | } | 900 | } |
@@ -901,8 +902,16 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
901 | return; | 902 | return; |
902 | } | 903 | } |
903 | 904 | ||
904 | if (ios->clock) | 905 | if (ios->clock) { |
906 | if (!host->power) { | ||
907 | if (p->set_pwr) | ||
908 | p->set_pwr(host->pd, ios->power_mode); | ||
909 | pm_runtime_get_sync(&host->pd->dev); | ||
910 | host->power = true; | ||
911 | sh_mmcif_sync_reset(host); | ||
912 | } | ||
905 | sh_mmcif_clock_control(host, ios->clock); | 913 | sh_mmcif_clock_control(host, ios->clock); |
914 | } | ||
906 | 915 | ||
907 | host->bus_width = ios->bus_width; | 916 | host->bus_width = ios->bus_width; |
908 | host->state = STATE_IDLE; | 917 | host->state = STATE_IDLE; |
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index ce500f03df85..774f6439d7ce 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/mmc/sh_mobile_sdhi.h> | 26 | #include <linux/mmc/sh_mobile_sdhi.h> |
27 | #include <linux/mfd/tmio.h> | 27 | #include <linux/mfd/tmio.h> |
28 | #include <linux/sh_dma.h> | 28 | #include <linux/sh_dma.h> |
29 | #include <linux/delay.h> | ||
29 | 30 | ||
30 | #include "tmio_mmc.h" | 31 | #include "tmio_mmc.h" |
31 | 32 | ||
@@ -55,6 +56,39 @@ static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) | |||
55 | return -ENOSYS; | 56 | return -ENOSYS; |
56 | } | 57 | } |
57 | 58 | ||
59 | static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host) | ||
60 | { | ||
61 | int timeout = 1000; | ||
62 | |||
63 | while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13))) | ||
64 | udelay(1); | ||
65 | |||
66 | if (!timeout) { | ||
67 | dev_warn(host->pdata->dev, "timeout waiting for SD bus idle\n"); | ||
68 | return -EBUSY; | ||
69 | } | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) | ||
75 | { | ||
76 | switch (addr) | ||
77 | { | ||
78 | case CTL_SD_CMD: | ||
79 | case CTL_STOP_INTERNAL_ACTION: | ||
80 | case CTL_XFER_BLK_COUNT: | ||
81 | case CTL_SD_CARD_CLK_CTL: | ||
82 | case CTL_SD_XFER_LEN: | ||
83 | case CTL_SD_MEM_CARD_OPT: | ||
84 | case CTL_TRANSACTION_CTL: | ||
85 | case CTL_DMA_ENABLE: | ||
86 | return sh_mobile_sdhi_wait_idle(host); | ||
87 | } | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
58 | static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | 92 | static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) |
59 | { | 93 | { |
60 | struct sh_mobile_sdhi *priv; | 94 | struct sh_mobile_sdhi *priv; |
@@ -86,6 +120,8 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
86 | mmc_data->hclk = clk_get_rate(priv->clk); | 120 | mmc_data->hclk = clk_get_rate(priv->clk); |
87 | mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; | 121 | mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; |
88 | mmc_data->get_cd = sh_mobile_sdhi_get_cd; | 122 | mmc_data->get_cd = sh_mobile_sdhi_get_cd; |
123 | if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) | ||
124 | mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; | ||
89 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; | 125 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; |
90 | if (p) { | 126 | if (p) { |
91 | mmc_data->flags = p->tmio_flags; | 127 | mmc_data->flags = p->tmio_flags; |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 8260bc2c34e3..087d88023ba1 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
20 | #include <linux/mmc/tmio.h> | 20 | #include <linux/mmc/tmio.h> |
21 | #include <linux/mutex.h> | ||
21 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
22 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
23 | 24 | ||
@@ -52,6 +53,8 @@ struct tmio_mmc_host { | |||
52 | void (*set_clk_div)(struct platform_device *host, int state); | 53 | void (*set_clk_div)(struct platform_device *host, int state); |
53 | 54 | ||
54 | int pm_error; | 55 | int pm_error; |
56 | /* recognise system-wide suspend in runtime PM methods */ | ||
57 | bool pm_global; | ||
55 | 58 | ||
56 | /* pio related stuff */ | 59 | /* pio related stuff */ |
57 | struct scatterlist *sg_ptr; | 60 | struct scatterlist *sg_ptr; |
@@ -73,8 +76,11 @@ struct tmio_mmc_host { | |||
73 | 76 | ||
74 | /* Track lost interrupts */ | 77 | /* Track lost interrupts */ |
75 | struct delayed_work delayed_reset_work; | 78 | struct delayed_work delayed_reset_work; |
76 | spinlock_t lock; | 79 | struct work_struct done; |
80 | |||
81 | spinlock_t lock; /* protect host private data */ | ||
77 | unsigned long last_req_ts; | 82 | unsigned long last_req_ts; |
83 | struct mutex ios_lock; /* protect set_ios() context */ | ||
78 | }; | 84 | }; |
79 | 85 | ||
80 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, | 86 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, |
@@ -103,6 +109,7 @@ static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, | |||
103 | 109 | ||
104 | #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) | 110 | #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) |
105 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); | 111 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); |
112 | void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable); | ||
106 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); | 113 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); |
107 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); | 114 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); |
108 | #else | 115 | #else |
@@ -111,6 +118,10 @@ static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, | |||
111 | { | 118 | { |
112 | } | 119 | } |
113 | 120 | ||
121 | static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | ||
122 | { | ||
123 | } | ||
124 | |||
114 | static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, | 125 | static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, |
115 | struct tmio_mmc_data *pdata) | 126 | struct tmio_mmc_data *pdata) |
116 | { | 127 | { |
@@ -134,4 +145,44 @@ int tmio_mmc_host_resume(struct device *dev); | |||
134 | int tmio_mmc_host_runtime_suspend(struct device *dev); | 145 | int tmio_mmc_host_runtime_suspend(struct device *dev); |
135 | int tmio_mmc_host_runtime_resume(struct device *dev); | 146 | int tmio_mmc_host_runtime_resume(struct device *dev); |
136 | 147 | ||
148 | static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
149 | { | ||
150 | return readw(host->ctl + (addr << host->bus_shift)); | ||
151 | } | ||
152 | |||
153 | static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
154 | u16 *buf, int count) | ||
155 | { | ||
156 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
157 | } | ||
158 | |||
159 | static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
160 | { | ||
161 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
162 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
163 | } | ||
164 | |||
165 | static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
166 | { | ||
167 | /* If there is a hook and it returns non-zero then there | ||
168 | * is an error and the write should be skipped | ||
169 | */ | ||
170 | if (host->pdata->write16_hook && host->pdata->write16_hook(host, addr)) | ||
171 | return; | ||
172 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
173 | } | ||
174 | |||
175 | static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
176 | u16 *buf, int count) | ||
177 | { | ||
178 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
179 | } | ||
180 | |||
181 | static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
182 | { | ||
183 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
184 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
185 | } | ||
186 | |||
187 | |||
137 | #endif | 188 | #endif |
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index 25f1ad6cbe09..2aa616dec32d 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
@@ -22,11 +22,14 @@ | |||
22 | 22 | ||
23 | #define TMIO_MMC_MIN_DMA_LEN 8 | 23 | #define TMIO_MMC_MIN_DMA_LEN 8 |
24 | 24 | ||
25 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | 25 | void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) |
26 | { | 26 | { |
27 | if (!host->chan_tx || !host->chan_rx) | ||
28 | return; | ||
29 | |||
27 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | 30 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) |
28 | /* Switch DMA mode on or off - SuperH specific? */ | 31 | /* Switch DMA mode on or off - SuperH specific? */ |
29 | writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); | 32 | sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0); |
30 | #endif | 33 | #endif |
31 | } | 34 | } |
32 | 35 | ||
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 0b09e8239aa0..1f16357e7301 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -46,40 +46,6 @@ | |||
46 | 46 | ||
47 | #include "tmio_mmc.h" | 47 | #include "tmio_mmc.h" |
48 | 48 | ||
49 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
50 | { | ||
51 | return readw(host->ctl + (addr << host->bus_shift)); | ||
52 | } | ||
53 | |||
54 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
55 | u16 *buf, int count) | ||
56 | { | ||
57 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
58 | } | ||
59 | |||
60 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
61 | { | ||
62 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
63 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
64 | } | ||
65 | |||
66 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
67 | { | ||
68 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
69 | } | ||
70 | |||
71 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
72 | u16 *buf, int count) | ||
73 | { | ||
74 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
75 | } | ||
76 | |||
77 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
78 | { | ||
79 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
80 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
81 | } | ||
82 | |||
83 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | 49 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) |
84 | { | 50 | { |
85 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); | 51 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); |
@@ -284,10 +250,16 @@ static void tmio_mmc_reset_work(struct work_struct *work) | |||
284 | /* called with host->lock held, interrupts disabled */ | 250 | /* called with host->lock held, interrupts disabled */ |
285 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | 251 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) |
286 | { | 252 | { |
287 | struct mmc_request *mrq = host->mrq; | 253 | struct mmc_request *mrq; |
254 | unsigned long flags; | ||
255 | |||
256 | spin_lock_irqsave(&host->lock, flags); | ||
288 | 257 | ||
289 | if (!mrq) | 258 | mrq = host->mrq; |
259 | if (IS_ERR_OR_NULL(mrq)) { | ||
260 | spin_unlock_irqrestore(&host->lock, flags); | ||
290 | return; | 261 | return; |
262 | } | ||
291 | 263 | ||
292 | host->cmd = NULL; | 264 | host->cmd = NULL; |
293 | host->data = NULL; | 265 | host->data = NULL; |
@@ -296,11 +268,18 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | |||
296 | cancel_delayed_work(&host->delayed_reset_work); | 268 | cancel_delayed_work(&host->delayed_reset_work); |
297 | 269 | ||
298 | host->mrq = NULL; | 270 | host->mrq = NULL; |
271 | spin_unlock_irqrestore(&host->lock, flags); | ||
299 | 272 | ||
300 | /* FIXME: mmc_request_done() can schedule! */ | ||
301 | mmc_request_done(host->mmc, mrq); | 273 | mmc_request_done(host->mmc, mrq); |
302 | } | 274 | } |
303 | 275 | ||
276 | static void tmio_mmc_done_work(struct work_struct *work) | ||
277 | { | ||
278 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | ||
279 | done); | ||
280 | tmio_mmc_finish_request(host); | ||
281 | } | ||
282 | |||
304 | /* These are the bitmasks the tmio chip requires to implement the MMC response | 283 | /* These are the bitmasks the tmio chip requires to implement the MMC response |
305 | * types. Note that R1 and R6 are the same in this scheme. */ | 284 | * types. Note that R1 and R6 are the same in this scheme. */ |
306 | #define APP_CMD 0x0040 | 285 | #define APP_CMD 0x0040 |
@@ -467,7 +446,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | |||
467 | BUG(); | 446 | BUG(); |
468 | } | 447 | } |
469 | 448 | ||
470 | tmio_mmc_finish_request(host); | 449 | schedule_work(&host->done); |
471 | } | 450 | } |
472 | 451 | ||
473 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | 452 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) |
@@ -557,7 +536,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | |||
557 | tasklet_schedule(&host->dma_issue); | 536 | tasklet_schedule(&host->dma_issue); |
558 | } | 537 | } |
559 | } else { | 538 | } else { |
560 | tmio_mmc_finish_request(host); | 539 | schedule_work(&host->done); |
561 | } | 540 | } |
562 | 541 | ||
563 | out: | 542 | out: |
@@ -567,6 +546,7 @@ out: | |||
567 | irqreturn_t tmio_mmc_irq(int irq, void *devid) | 546 | irqreturn_t tmio_mmc_irq(int irq, void *devid) |
568 | { | 547 | { |
569 | struct tmio_mmc_host *host = devid; | 548 | struct tmio_mmc_host *host = devid; |
549 | struct mmc_host *mmc = host->mmc; | ||
570 | struct tmio_mmc_data *pdata = host->pdata; | 550 | struct tmio_mmc_data *pdata = host->pdata; |
571 | unsigned int ireg, irq_mask, status; | 551 | unsigned int ireg, irq_mask, status; |
572 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | 552 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; |
@@ -588,13 +568,13 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) | |||
588 | if (sdio_ireg && !host->sdio_irq_enabled) { | 568 | if (sdio_ireg && !host->sdio_irq_enabled) { |
589 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | 569 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", |
590 | sdio_status, sdio_irq_mask, sdio_ireg); | 570 | sdio_status, sdio_irq_mask, sdio_ireg); |
591 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | 571 | tmio_mmc_enable_sdio_irq(mmc, 0); |
592 | goto out; | 572 | goto out; |
593 | } | 573 | } |
594 | 574 | ||
595 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | 575 | if (mmc->caps & MMC_CAP_SDIO_IRQ && |
596 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | 576 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) |
597 | mmc_signal_sdio_irq(host->mmc); | 577 | mmc_signal_sdio_irq(mmc); |
598 | 578 | ||
599 | if (sdio_ireg) | 579 | if (sdio_ireg) |
600 | goto out; | 580 | goto out; |
@@ -603,58 +583,49 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) | |||
603 | pr_debug_status(status); | 583 | pr_debug_status(status); |
604 | pr_debug_status(ireg); | 584 | pr_debug_status(ireg); |
605 | 585 | ||
606 | if (!ireg) { | 586 | /* Card insert / remove attempts */ |
607 | tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); | 587 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { |
608 | 588 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | |
609 | pr_warning("tmio_mmc: Spurious irq, disabling! " | 589 | TMIO_STAT_CARD_REMOVE); |
610 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | 590 | if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) || |
611 | pr_debug_status(status); | 591 | ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && |
612 | 592 | !work_pending(&mmc->detect.work)) | |
593 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
613 | goto out; | 594 | goto out; |
614 | } | 595 | } |
615 | 596 | ||
616 | while (ireg) { | 597 | /* CRC and other errors */ |
617 | /* Card insert / remove attempts */ | 598 | /* if (ireg & TMIO_STAT_ERR_IRQ) |
618 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | 599 | * handled |= tmio_error_irq(host, irq, stat); |
619 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | ||
620 | TMIO_STAT_CARD_REMOVE); | ||
621 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
622 | } | ||
623 | |||
624 | /* CRC and other errors */ | ||
625 | /* if (ireg & TMIO_STAT_ERR_IRQ) | ||
626 | * handled |= tmio_error_irq(host, irq, stat); | ||
627 | */ | 600 | */ |
628 | 601 | ||
629 | /* Command completion */ | 602 | /* Command completion */ |
630 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | 603 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { |
631 | tmio_mmc_ack_mmc_irqs(host, | 604 | tmio_mmc_ack_mmc_irqs(host, |
632 | TMIO_STAT_CMDRESPEND | | 605 | TMIO_STAT_CMDRESPEND | |
633 | TMIO_STAT_CMDTIMEOUT); | 606 | TMIO_STAT_CMDTIMEOUT); |
634 | tmio_mmc_cmd_irq(host, status); | 607 | tmio_mmc_cmd_irq(host, status); |
635 | } | 608 | goto out; |
636 | 609 | } | |
637 | /* Data transfer */ | ||
638 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | ||
639 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | ||
640 | tmio_mmc_pio_irq(host); | ||
641 | } | ||
642 | |||
643 | /* Data transfer completion */ | ||
644 | if (ireg & TMIO_STAT_DATAEND) { | ||
645 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
646 | tmio_mmc_data_irq(host); | ||
647 | } | ||
648 | 610 | ||
649 | /* Check status - keep going until we've handled it all */ | 611 | /* Data transfer */ |
650 | status = sd_ctrl_read32(host, CTL_STATUS); | 612 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { |
651 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | 613 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); |
652 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | 614 | tmio_mmc_pio_irq(host); |
615 | goto out; | ||
616 | } | ||
653 | 617 | ||
654 | pr_debug("Status at end of loop: %08x\n", status); | 618 | /* Data transfer completion */ |
655 | pr_debug_status(status); | 619 | if (ireg & TMIO_STAT_DATAEND) { |
620 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
621 | tmio_mmc_data_irq(host); | ||
622 | goto out; | ||
656 | } | 623 | } |
657 | pr_debug("MMC IRQ end\n"); | 624 | |
625 | pr_warning("tmio_mmc: Spurious irq, disabling! " | ||
626 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | ||
627 | pr_debug_status(status); | ||
628 | tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); | ||
658 | 629 | ||
659 | out: | 630 | out: |
660 | return IRQ_HANDLED; | 631 | return IRQ_HANDLED; |
@@ -749,6 +720,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
749 | struct tmio_mmc_data *pdata = host->pdata; | 720 | struct tmio_mmc_data *pdata = host->pdata; |
750 | unsigned long flags; | 721 | unsigned long flags; |
751 | 722 | ||
723 | mutex_lock(&host->ios_lock); | ||
724 | |||
752 | spin_lock_irqsave(&host->lock, flags); | 725 | spin_lock_irqsave(&host->lock, flags); |
753 | if (host->mrq) { | 726 | if (host->mrq) { |
754 | if (IS_ERR(host->mrq)) { | 727 | if (IS_ERR(host->mrq)) { |
@@ -764,6 +737,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
764 | host->mrq->cmd->opcode, host->last_req_ts, jiffies); | 737 | host->mrq->cmd->opcode, host->last_req_ts, jiffies); |
765 | } | 738 | } |
766 | spin_unlock_irqrestore(&host->lock, flags); | 739 | spin_unlock_irqrestore(&host->lock, flags); |
740 | |||
741 | mutex_unlock(&host->ios_lock); | ||
767 | return; | 742 | return; |
768 | } | 743 | } |
769 | 744 | ||
@@ -771,33 +746,30 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
771 | 746 | ||
772 | spin_unlock_irqrestore(&host->lock, flags); | 747 | spin_unlock_irqrestore(&host->lock, flags); |
773 | 748 | ||
774 | if (ios->clock) | 749 | /* |
775 | tmio_mmc_set_clock(host, ios->clock); | 750 | * pdata->power == false only if COLD_CD is available, otherwise only |
776 | 751 | * in short time intervals during probing or resuming | |
777 | /* Power sequence - OFF -> UP -> ON */ | 752 | */ |
778 | if (ios->power_mode == MMC_POWER_UP) { | 753 | if (ios->power_mode == MMC_POWER_ON && ios->clock) { |
779 | if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) { | 754 | if (!pdata->power) { |
780 | pm_runtime_get_sync(&host->pdev->dev); | 755 | pm_runtime_get_sync(&host->pdev->dev); |
781 | pdata->power = true; | 756 | pdata->power = true; |
782 | } | 757 | } |
758 | tmio_mmc_set_clock(host, ios->clock); | ||
783 | /* power up SD bus */ | 759 | /* power up SD bus */ |
784 | if (host->set_pwr) | 760 | if (host->set_pwr) |
785 | host->set_pwr(host->pdev, 1); | 761 | host->set_pwr(host->pdev, 1); |
786 | } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { | ||
787 | /* power down SD bus */ | ||
788 | if (ios->power_mode == MMC_POWER_OFF) { | ||
789 | if (host->set_pwr) | ||
790 | host->set_pwr(host->pdev, 0); | ||
791 | if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && | ||
792 | pdata->power) { | ||
793 | pdata->power = false; | ||
794 | pm_runtime_put(&host->pdev->dev); | ||
795 | } | ||
796 | } | ||
797 | tmio_mmc_clk_stop(host); | ||
798 | } else { | ||
799 | /* start bus clock */ | 762 | /* start bus clock */ |
800 | tmio_mmc_clk_start(host); | 763 | tmio_mmc_clk_start(host); |
764 | } else if (ios->power_mode != MMC_POWER_UP) { | ||
765 | if (host->set_pwr) | ||
766 | host->set_pwr(host->pdev, 0); | ||
767 | if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && | ||
768 | pdata->power) { | ||
769 | pdata->power = false; | ||
770 | pm_runtime_put(&host->pdev->dev); | ||
771 | } | ||
772 | tmio_mmc_clk_stop(host); | ||
801 | } | 773 | } |
802 | 774 | ||
803 | switch (ios->bus_width) { | 775 | switch (ios->bus_width) { |
@@ -817,6 +789,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
817 | current->comm, task_pid_nr(current), | 789 | current->comm, task_pid_nr(current), |
818 | ios->clock, ios->power_mode); | 790 | ios->clock, ios->power_mode); |
819 | host->mrq = NULL; | 791 | host->mrq = NULL; |
792 | |||
793 | mutex_unlock(&host->ios_lock); | ||
820 | } | 794 | } |
821 | 795 | ||
822 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | 796 | static int tmio_mmc_get_ro(struct mmc_host *mmc) |
@@ -913,16 +887,20 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, | |||
913 | tmio_mmc_enable_sdio_irq(mmc, 0); | 887 | tmio_mmc_enable_sdio_irq(mmc, 0); |
914 | 888 | ||
915 | spin_lock_init(&_host->lock); | 889 | spin_lock_init(&_host->lock); |
890 | mutex_init(&_host->ios_lock); | ||
916 | 891 | ||
917 | /* Init delayed work for request timeouts */ | 892 | /* Init delayed work for request timeouts */ |
918 | INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); | 893 | INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); |
894 | INIT_WORK(&_host->done, tmio_mmc_done_work); | ||
919 | 895 | ||
920 | /* See if we also get DMA */ | 896 | /* See if we also get DMA */ |
921 | tmio_mmc_request_dma(_host, pdata); | 897 | tmio_mmc_request_dma(_host, pdata); |
922 | 898 | ||
923 | /* We have to keep the device powered for its card detection to work */ | 899 | /* We have to keep the device powered for its card detection to work */ |
924 | if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) | 900 | if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) { |
901 | pdata->power = true; | ||
925 | pm_runtime_get_noresume(&pdev->dev); | 902 | pm_runtime_get_noresume(&pdev->dev); |
903 | } | ||
926 | 904 | ||
927 | mmc_add_host(mmc); | 905 | mmc_add_host(mmc); |
928 | 906 | ||
@@ -963,6 +941,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) | |||
963 | pm_runtime_get_sync(&pdev->dev); | 941 | pm_runtime_get_sync(&pdev->dev); |
964 | 942 | ||
965 | mmc_remove_host(host->mmc); | 943 | mmc_remove_host(host->mmc); |
944 | cancel_work_sync(&host->done); | ||
966 | cancel_delayed_work_sync(&host->delayed_reset_work); | 945 | cancel_delayed_work_sync(&host->delayed_reset_work); |
967 | tmio_mmc_release_dma(host); | 946 | tmio_mmc_release_dma(host); |
968 | 947 | ||
@@ -998,11 +977,16 @@ int tmio_mmc_host_resume(struct device *dev) | |||
998 | /* The MMC core will perform the complete set up */ | 977 | /* The MMC core will perform the complete set up */ |
999 | host->pdata->power = false; | 978 | host->pdata->power = false; |
1000 | 979 | ||
980 | host->pm_global = true; | ||
1001 | if (!host->pm_error) | 981 | if (!host->pm_error) |
1002 | pm_runtime_get_sync(dev); | 982 | pm_runtime_get_sync(dev); |
1003 | 983 | ||
1004 | tmio_mmc_reset(mmc_priv(mmc)); | 984 | if (host->pm_global) { |
1005 | tmio_mmc_request_dma(host, host->pdata); | 985 | /* Runtime PM resume callback didn't run */ |
986 | tmio_mmc_reset(host); | ||
987 | tmio_mmc_enable_dma(host, true); | ||
988 | host->pm_global = false; | ||
989 | } | ||
1006 | 990 | ||
1007 | return mmc_resume_host(mmc); | 991 | return mmc_resume_host(mmc); |
1008 | } | 992 | } |
@@ -1023,12 +1007,15 @@ int tmio_mmc_host_runtime_resume(struct device *dev) | |||
1023 | struct tmio_mmc_data *pdata = host->pdata; | 1007 | struct tmio_mmc_data *pdata = host->pdata; |
1024 | 1008 | ||
1025 | tmio_mmc_reset(host); | 1009 | tmio_mmc_reset(host); |
1010 | tmio_mmc_enable_dma(host, true); | ||
1026 | 1011 | ||
1027 | if (pdata->power) { | 1012 | if (pdata->power) { |
1028 | /* Only entered after a card-insert interrupt */ | 1013 | /* Only entered after a card-insert interrupt */ |
1029 | tmio_mmc_set_ios(mmc, &mmc->ios); | 1014 | if (!mmc->card) |
1015 | tmio_mmc_set_ios(mmc, &mmc->ios); | ||
1030 | mmc_detect_change(mmc, msecs_to_jiffies(100)); | 1016 | mmc_detect_change(mmc, msecs_to_jiffies(100)); |
1031 | } | 1017 | } |
1018 | host->pm_global = false; | ||
1032 | 1019 | ||
1033 | return 0; | 1020 | return 0; |
1034 | } | 1021 | } |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 02145e9697a9..1196f61a4ab6 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2758,6 +2758,29 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) | |||
2758 | 2758 | ||
2759 | dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); | 2759 | dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); |
2760 | dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); | 2760 | dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); |
2761 | |||
2762 | /* | ||
2763 | * RICOH 0xe823 SD/MMC card reader fails to recognize | ||
2764 | * certain types of SD/MMC cards. Lowering the SD base | ||
2765 | * clock frequency from 200Mhz to 50Mhz fixes this issue. | ||
2766 | * | ||
2767 | * 0x150 - SD2.0 mode enable for changing base clock | ||
2768 | * frequency to 50Mhz | ||
2769 | * 0xe1 - Base clock frequency | ||
2770 | * 0x32 - 50Mhz new clock frequency | ||
2771 | * 0xf9 - Key register for 0x150 | ||
2772 | * 0xfc - key register for 0xe1 | ||
2773 | */ | ||
2774 | if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { | ||
2775 | pci_write_config_byte(dev, 0xf9, 0xfc); | ||
2776 | pci_write_config_byte(dev, 0x150, 0x10); | ||
2777 | pci_write_config_byte(dev, 0xf9, 0x00); | ||
2778 | pci_write_config_byte(dev, 0xfc, 0x01); | ||
2779 | pci_write_config_byte(dev, 0xe1, 0x32); | ||
2780 | pci_write_config_byte(dev, 0xfc, 0x00); | ||
2781 | |||
2782 | dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n"); | ||
2783 | } | ||
2761 | } | 2784 | } |
2762 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | 2785 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); |
2763 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | 2786 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); |