aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorAndrei Warkentin <andreiw@motorola.com>2011-05-23 16:06:36 -0400
committerChris Ball <cjb@laptop.org>2011-05-25 16:48:46 -0400
commitd0c97cfb81ebc5b416c0f92fa2fc18d2773e3023 (patch)
treedbf0fa49bdad896d283a7f392c156483d9687d4b /drivers/mmc
parentc59de9287993b5c36f9005f745a3ce0b1008131d (diff)
mmc: core: Use CMD23 for multiblock transfers when we can.
CMD23-prefixed instead of open-ended multiblock transfers have a performance advantage on some MMC cards. Signed-off-by: Andrei Warkentin <andreiw@motorola.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/block.c108
1 files changed, 79 insertions, 29 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 126c7f41c5a3..a380accaba9a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,10 +59,6 @@ MODULE_ALIAS("mmc:block");
59#define INAND_CMD38_ARG_SECTRIM1 0x81 59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88 60#define INAND_CMD38_ARG_SECTRIM2 0x88
61 61
62#define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \
63 (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \
64 ((card)->ext_csd.rel_sectors)))
65
66static DEFINE_MUTEX(block_mutex); 62static DEFINE_MUTEX(block_mutex);
67 63
68/* 64/*
@@ -90,6 +86,10 @@ struct mmc_blk_data {
90 struct mmc_queue queue; 86 struct mmc_queue queue;
91 struct list_head part; 87 struct list_head part;
92 88
89 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
92
93 unsigned int usage; 93 unsigned int usage;
94 unsigned int read_only; 94 unsigned int read_only;
95 unsigned int part_type; 95 unsigned int part_type;
@@ -429,6 +429,7 @@ static const struct block_device_operations mmc_bdops = {
429 429
430struct mmc_blk_request { 430struct mmc_blk_request {
431 struct mmc_request mrq; 431 struct mmc_request mrq;
432 struct mmc_command sbc;
432 struct mmc_command cmd; 433 struct mmc_command cmd;
433 struct mmc_command stop; 434 struct mmc_command stop;
434 struct mmc_data data; 435 struct mmc_data data;
@@ -652,13 +653,10 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
652 * reliable write can handle, thus finish the request in 653 * reliable write can handle, thus finish the request in
653 * partial completions. 654 * partial completions.
654 */ 655 */
655static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq, 656static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
656 struct mmc_card *card, 657 struct mmc_card *card,
657 struct request *req) 658 struct request *req)
658{ 659{
659 int err;
660 struct mmc_command set_count = {0};
661
662 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 660 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
663 /* Legacy mode imposes restrictions on transfers. */ 661 /* Legacy mode imposes restrictions on transfers. */
664 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) 662 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
@@ -669,15 +667,6 @@ static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
669 else if (brq->data.blocks < card->ext_csd.rel_sectors) 667 else if (brq->data.blocks < card->ext_csd.rel_sectors)
670 brq->data.blocks = 1; 668 brq->data.blocks = 1;
671 } 669 }
672
673 set_count.opcode = MMC_SET_BLOCK_COUNT;
674 set_count.arg = brq->data.blocks | (1 << 31);
675 set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
676 err = mmc_wait_for_cmd(card->host, &set_count, 0);
677 if (err)
678 printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
679 req->rq_disk->disk_name, err);
680 return err;
681} 670}
682 671
683static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 672static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
@@ -694,7 +683,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
694 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 683 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
695 (req->cmd_flags & REQ_META)) && 684 (req->cmd_flags & REQ_META)) &&
696 (rq_data_dir(req) == WRITE) && 685 (rq_data_dir(req) == WRITE) &&
697 REL_WRITES_SUPPORTED(card); 686 (md->flags & MMC_BLK_REL_WR);
698 687
699 do { 688 do {
700 struct mmc_command cmd = {0}; 689 struct mmc_command cmd = {0};
@@ -732,11 +721,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
732 721
733 if (brq.data.blocks > 1 || do_rel_wr) { 722 if (brq.data.blocks > 1 || do_rel_wr) {
734 /* SPI multiblock writes terminate using a special 723 /* SPI multiblock writes terminate using a special
735 * token, not a STOP_TRANSMISSION request. Reliable 724 * token, not a STOP_TRANSMISSION request.
736 * writes use SET_BLOCK_COUNT and do not use a
737 * STOP_TRANSMISSION request either.
738 */ 725 */
739 if ((!mmc_host_is_spi(card->host) && !do_rel_wr) || 726 if (!mmc_host_is_spi(card->host) ||
740 rq_data_dir(req) == READ) 727 rq_data_dir(req) == READ)
741 brq.mrq.stop = &brq.stop; 728 brq.mrq.stop = &brq.stop;
742 readcmd = MMC_READ_MULTIPLE_BLOCK; 729 readcmd = MMC_READ_MULTIPLE_BLOCK;
@@ -754,8 +741,37 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
754 brq.data.flags |= MMC_DATA_WRITE; 741 brq.data.flags |= MMC_DATA_WRITE;
755 } 742 }
756 743
757 if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req)) 744 if (do_rel_wr)
758 goto cmd_err; 745 mmc_apply_rel_rw(&brq, card, req);
746
747 /*
748 * Pre-defined multi-block transfers are preferable to
749 * open ended-ones (and necessary for reliable writes).
750 * However, it is not sufficient to just send CMD23,
751 * and avoid the final CMD12, as on an error condition
752 * CMD12 (stop) needs to be sent anyway. This, coupled
753 * with Auto-CMD23 enhancements provided by some
754 * hosts, means that the complexity of dealing
755 * with this is best left to the host. If CMD23 is
756 * supported by card and host, we'll fill sbc in and let
757 * the host deal with handling it correctly. This means
758 * that for hosts that don't expose MMC_CAP_CMD23, no
759 * change of behavior will be observed.
760 *
761 * N.B: Some MMC cards experience perf degradation.
762 * We'll avoid using CMD23-bounded multiblock writes for
763 * these, while retaining features like reliable writes.
764 */
765
766 if ((md->flags & MMC_BLK_CMD23) &&
767 mmc_op_multi(brq.cmd.opcode) &&
768 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
769 brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
770 brq.sbc.arg = brq.data.blocks |
771 (do_rel_wr ? (1 << 31) : 0);
772 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
773 brq.mrq.sbc = &brq.sbc;
774 }
759 775
760 mmc_set_data_timeout(&brq.data, card); 776 mmc_set_data_timeout(&brq.data, card);
761 777
@@ -792,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
792 * until later as we need to wait for the card to leave 808 * until later as we need to wait for the card to leave
793 * programming mode even when things go wrong. 809 * programming mode even when things go wrong.
794 */ 810 */
795 if (brq.cmd.error || brq.data.error || brq.stop.error) { 811 if (brq.sbc.error || brq.cmd.error ||
812 brq.data.error || brq.stop.error) {
796 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { 813 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
797 /* Redo read one sector at a time */ 814 /* Redo read one sector at a time */
798 printk(KERN_WARNING "%s: retrying using single " 815 printk(KERN_WARNING "%s: retrying using single "
@@ -803,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
803 status = get_card_status(card, req); 820 status = get_card_status(card, req);
804 } 821 }
805 822
823 if (brq.sbc.error) {
824 printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
825 "command, response %#x, card status %#x\n",
826 req->rq_disk->disk_name, brq.sbc.error,
827 brq.sbc.resp[0], status);
828 }
829
806 if (brq.cmd.error) { 830 if (brq.cmd.error) {
807 printk(KERN_ERR "%s: error %d sending read/write " 831 printk(KERN_ERR "%s: error %d sending read/write "
808 "command, response %#x, card status %#x\n", 832 "command, response %#x, card status %#x\n",
@@ -1014,8 +1038,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1014 md->disk->queue = md->queue.queue; 1038 md->disk->queue = md->queue.queue;
1015 md->disk->driverfs_dev = parent; 1039 md->disk->driverfs_dev = parent;
1016 set_disk_ro(md->disk, md->read_only || default_ro); 1040 set_disk_ro(md->disk, md->read_only || default_ro);
1017 if (REL_WRITES_SUPPORTED(card))
1018 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1019 1041
1020 /* 1042 /*
1021 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1043 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -1034,6 +1056,19 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1034 1056
1035 blk_queue_logical_block_size(md->queue.queue, 512); 1057 blk_queue_logical_block_size(md->queue.queue, 512);
1036 set_capacity(md->disk, size); 1058 set_capacity(md->disk, size);
1059
1060 if (mmc_host_cmd23(card->host) &&
1061 mmc_card_mmc(card))
1062 md->flags |= MMC_BLK_CMD23;
1063
1064 if (mmc_card_mmc(card) &&
1065 md->flags & MMC_BLK_CMD23 &&
1066 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1067 card->ext_csd.rel_sectors)) {
1068 md->flags |= MMC_BLK_REL_WR;
1069 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1070 }
1071
1037 return md; 1072 return md;
1038 1073
1039 err_putdisk: 1074 err_putdisk:
@@ -1189,6 +1224,21 @@ static const struct mmc_fixup blk_fixups[] =
1189 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1224 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1190 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1225 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1191 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), 1226 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1227
1228 /*
1229 * Some MMC cards experience performance degradation with CMD23
1230 * instead of CMD12-bounded multiblock transfers. For now we'll
1231 * black list what's bad...
1232 * - Certain Toshiba cards.
1233 *
1234 * N.B. This doesn't affect SD cards.
1235 */
1236 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1237 MMC_QUIRK_BLK_NO_CMD23),
1238 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1239 MMC_QUIRK_BLK_NO_CMD23),
1240 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1241 MMC_QUIRK_BLK_NO_CMD23),
1192 END_FIXUP 1242 END_FIXUP
1193}; 1243};
1194 1244