aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 17:16:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 17:16:11 -0400
commit46b51ea2099fa2082342e52b8284aa828429b80b (patch)
tree0a0d7bfe1aff036c86a2e7beacbd91398008bfb6 /drivers/mmc/card
parent1fdb24e969110fafea36d3b393bea438f702c87f (diff)
parenta6029e1f75bb484c1f5bc68b6a8572e4024795bc (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (83 commits) mmc: fix compile error when CONFIG_BLOCK is not enabled mmc: core: Cleanup eMMC4.5 conditionals mmc: omap_hsmmc: if multiblock reads are broken, disable them mmc: core: add workaround for controllers with broken multiblock reads mmc: core: Prevent too long response times for suspend mmc: recognise SDIO cards with SDIO_CCCR_REV 3.00 mmc: sd: Handle SD3.0 cards not supporting UHS-I bus speed mode mmc: core: support HPI send command mmc: core: Add cache control for eMMC4.5 device mmc: core: Modify the timeout value for writing power class mmc: core: new discard feature support at eMMC v4.5 mmc: core: mmc sanitize feature support for v4.5 mmc: dw_mmc: modify DATA register offset mmc: sdhci-pci: add flag for devices that can support runtime PM mmc: omap_hsmmc: ensure pbias configuration is always done mmc: core: Add Power Off Notify Feature eMMC 4.5 mmc: sdhci-s3c: fix potential NULL dereference mmc: replace printk with appropriate display macro mmc: core: Add default timeout value for CMD6 mmc: sdhci-pci: add runtime pm support ...
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/block.c310
-rw-r--r--drivers/mmc/card/mmc_test.c65
-rw-r--r--drivers/mmc/card/queue.c8
-rw-r--r--drivers/mmc/card/sdio_uart.c10
4 files changed, 269 insertions, 124 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 4c1a648d00fc..a1cb21f95302 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -94,6 +94,11 @@ struct mmc_blk_data {
94 unsigned int read_only; 94 unsigned int read_only;
95 unsigned int part_type; 95 unsigned int part_type;
96 unsigned int name_idx; 96 unsigned int name_idx;
97 unsigned int reset_done;
98#define MMC_BLK_READ BIT(0)
99#define MMC_BLK_WRITE BIT(1)
100#define MMC_BLK_DISCARD BIT(2)
101#define MMC_BLK_SECDISCARD BIT(3)
97 102
98 /* 103 /*
99 * Only set in main mmc_blk_data associated 104 * Only set in main mmc_blk_data associated
@@ -109,11 +114,11 @@ static DEFINE_MUTEX(open_lock);
109enum mmc_blk_status { 114enum mmc_blk_status {
110 MMC_BLK_SUCCESS = 0, 115 MMC_BLK_SUCCESS = 0,
111 MMC_BLK_PARTIAL, 116 MMC_BLK_PARTIAL,
112 MMC_BLK_RETRY,
113 MMC_BLK_RETRY_SINGLE,
114 MMC_BLK_DATA_ERR,
115 MMC_BLK_CMD_ERR, 117 MMC_BLK_CMD_ERR,
118 MMC_BLK_RETRY,
116 MMC_BLK_ABORT, 119 MMC_BLK_ABORT,
120 MMC_BLK_DATA_ERR,
121 MMC_BLK_ECC_ERR,
117}; 122};
118 123
119module_param(perdev_minors, int, 0444); 124module_param(perdev_minors, int, 0444);
@@ -291,7 +296,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
291 struct mmc_card *card; 296 struct mmc_card *card;
292 struct mmc_command cmd = {0}; 297 struct mmc_command cmd = {0};
293 struct mmc_data data = {0}; 298 struct mmc_data data = {0};
294 struct mmc_request mrq = {0}; 299 struct mmc_request mrq = {NULL};
295 struct scatterlist sg; 300 struct scatterlist sg;
296 int err; 301 int err;
297 302
@@ -442,19 +447,24 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
442{ 447{
443 int ret; 448 int ret;
444 struct mmc_blk_data *main_md = mmc_get_drvdata(card); 449 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
450
445 if (main_md->part_curr == md->part_type) 451 if (main_md->part_curr == md->part_type)
446 return 0; 452 return 0;
447 453
448 if (mmc_card_mmc(card)) { 454 if (mmc_card_mmc(card)) {
449 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 455 u8 part_config = card->ext_csd.part_config;
450 card->ext_csd.part_config |= md->part_type; 456
457 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
458 part_config |= md->part_type;
451 459
452 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 460 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
453 EXT_CSD_PART_CONFIG, card->ext_csd.part_config, 461 EXT_CSD_PART_CONFIG, part_config,
454 card->ext_csd.part_time); 462 card->ext_csd.part_time);
455 if (ret) 463 if (ret)
456 return ret; 464 return ret;
457} 465
466 card->ext_csd.part_config = part_config;
467 }
458 468
459 main_md->part_curr = md->part_type; 469 main_md->part_curr = md->part_type;
460 return 0; 470 return 0;
@@ -466,7 +476,7 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
466 u32 result; 476 u32 result;
467 __be32 *blocks; 477 __be32 *blocks;
468 478
469 struct mmc_request mrq = {0}; 479 struct mmc_request mrq = {NULL};
470 struct mmc_command cmd = {0}; 480 struct mmc_command cmd = {0};
471 struct mmc_data data = {0}; 481 struct mmc_data data = {0};
472 unsigned int timeout_us; 482 unsigned int timeout_us;
@@ -616,7 +626,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
616 * Otherwise we don't understand what happened, so abort. 626 * Otherwise we don't understand what happened, so abort.
617 */ 627 */
618static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, 628static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
619 struct mmc_blk_request *brq) 629 struct mmc_blk_request *brq, int *ecc_err)
620{ 630{
621 bool prev_cmd_status_valid = true; 631 bool prev_cmd_status_valid = true;
622 u32 status, stop_status = 0; 632 u32 status, stop_status = 0;
@@ -641,6 +651,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
641 if (err) 651 if (err)
642 return ERR_ABORT; 652 return ERR_ABORT;
643 653
654 /* Flag ECC errors */
655 if ((status & R1_CARD_ECC_FAILED) ||
656 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
657 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
658 *ecc_err = 1;
659
644 /* 660 /*
645 * Check the current card state. If it is in some data transfer 661 * Check the current card state. If it is in some data transfer
646 * mode, tell it to stop (and hopefully transition back to TRAN.) 662 * mode, tell it to stop (and hopefully transition back to TRAN.)
@@ -658,6 +674,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
658 */ 674 */
659 if (err) 675 if (err)
660 return ERR_ABORT; 676 return ERR_ABORT;
677 if (stop_status & R1_CARD_ECC_FAILED)
678 *ecc_err = 1;
661 } 679 }
662 680
663 /* Check for set block count errors */ 681 /* Check for set block count errors */
@@ -670,6 +688,10 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
670 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, 688 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
671 prev_cmd_status_valid, status); 689 prev_cmd_status_valid, status);
672 690
691 /* Data errors */
692 if (!brq->stop.error)
693 return ERR_CONTINUE;
694
673 /* Now for stop errors. These aren't fatal to the transfer. */ 695 /* Now for stop errors. These aren't fatal to the transfer. */
674 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", 696 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
675 req->rq_disk->disk_name, brq->stop.error, 697 req->rq_disk->disk_name, brq->stop.error,
@@ -686,12 +708,45 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
686 return ERR_CONTINUE; 708 return ERR_CONTINUE;
687} 709}
688 710
711static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
712 int type)
713{
714 int err;
715
716 if (md->reset_done & type)
717 return -EEXIST;
718
719 md->reset_done |= type;
720 err = mmc_hw_reset(host);
721 /* Ensure we switch back to the correct partition */
722 if (err != -EOPNOTSUPP) {
723 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
724 int part_err;
725
726 main_md->part_curr = main_md->part_type;
727 part_err = mmc_blk_part_switch(host->card, md);
728 if (part_err) {
729 /*
730 * We have failed to get back into the correct
731 * partition, so we need to abort the whole request.
732 */
733 return -ENODEV;
734 }
735 }
736 return err;
737}
738
739static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
740{
741 md->reset_done &= ~type;
742}
743
689static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 744static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
690{ 745{
691 struct mmc_blk_data *md = mq->data; 746 struct mmc_blk_data *md = mq->data;
692 struct mmc_card *card = md->queue.card; 747 struct mmc_card *card = md->queue.card;
693 unsigned int from, nr, arg; 748 unsigned int from, nr, arg;
694 int err = 0; 749 int err = 0, type = MMC_BLK_DISCARD;
695 750
696 if (!mmc_can_erase(card)) { 751 if (!mmc_can_erase(card)) {
697 err = -EOPNOTSUPP; 752 err = -EOPNOTSUPP;
@@ -701,11 +756,13 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
701 from = blk_rq_pos(req); 756 from = blk_rq_pos(req);
702 nr = blk_rq_sectors(req); 757 nr = blk_rq_sectors(req);
703 758
704 if (mmc_can_trim(card)) 759 if (mmc_can_discard(card))
760 arg = MMC_DISCARD_ARG;
761 else if (mmc_can_trim(card))
705 arg = MMC_TRIM_ARG; 762 arg = MMC_TRIM_ARG;
706 else 763 else
707 arg = MMC_ERASE_ARG; 764 arg = MMC_ERASE_ARG;
708 765retry:
709 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 766 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
710 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 767 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
711 INAND_CMD38_ARG_EXT_CSD, 768 INAND_CMD38_ARG_EXT_CSD,
@@ -718,6 +775,10 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
718 } 775 }
719 err = mmc_erase(card, from, nr, arg); 776 err = mmc_erase(card, from, nr, arg);
720out: 777out:
778 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
779 goto retry;
780 if (!err)
781 mmc_blk_reset_success(md, type);
721 spin_lock_irq(&md->lock); 782 spin_lock_irq(&md->lock);
722 __blk_end_request(req, err, blk_rq_bytes(req)); 783 __blk_end_request(req, err, blk_rq_bytes(req));
723 spin_unlock_irq(&md->lock); 784 spin_unlock_irq(&md->lock);
@@ -731,13 +792,20 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
731 struct mmc_blk_data *md = mq->data; 792 struct mmc_blk_data *md = mq->data;
732 struct mmc_card *card = md->queue.card; 793 struct mmc_card *card = md->queue.card;
733 unsigned int from, nr, arg; 794 unsigned int from, nr, arg;
734 int err = 0; 795 int err = 0, type = MMC_BLK_SECDISCARD;
735 796
736 if (!mmc_can_secure_erase_trim(card)) { 797 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
737 err = -EOPNOTSUPP; 798 err = -EOPNOTSUPP;
738 goto out; 799 goto out;
739 } 800 }
740 801
802 /* The sanitize operation is supported at v4.5 only */
803 if (mmc_can_sanitize(card)) {
804 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
805 EXT_CSD_SANITIZE_START, 1, 0);
806 goto out;
807 }
808
741 from = blk_rq_pos(req); 809 from = blk_rq_pos(req);
742 nr = blk_rq_sectors(req); 810 nr = blk_rq_sectors(req);
743 811
@@ -745,7 +813,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
745 arg = MMC_SECURE_TRIM1_ARG; 813 arg = MMC_SECURE_TRIM1_ARG;
746 else 814 else
747 arg = MMC_SECURE_ERASE_ARG; 815 arg = MMC_SECURE_ERASE_ARG;
748 816retry:
749 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 817 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
750 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 818 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
751 INAND_CMD38_ARG_EXT_CSD, 819 INAND_CMD38_ARG_EXT_CSD,
@@ -769,6 +837,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
769 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 837 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
770 } 838 }
771out: 839out:
840 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
841 goto retry;
842 if (!err)
843 mmc_blk_reset_success(md, type);
772 spin_lock_irq(&md->lock); 844 spin_lock_irq(&md->lock);
773 __blk_end_request(req, err, blk_rq_bytes(req)); 845 __blk_end_request(req, err, blk_rq_bytes(req));
774 spin_unlock_irq(&md->lock); 846 spin_unlock_irq(&md->lock);
@@ -779,16 +851,18 @@ out:
779static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 851static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
780{ 852{
781 struct mmc_blk_data *md = mq->data; 853 struct mmc_blk_data *md = mq->data;
854 struct mmc_card *card = md->queue.card;
855 int ret = 0;
856
857 ret = mmc_flush_cache(card);
858 if (ret)
859 ret = -EIO;
782 860
783 /*
784 * No-op, only service this because we need REQ_FUA for reliable
785 * writes.
786 */
787 spin_lock_irq(&md->lock); 861 spin_lock_irq(&md->lock);
788 __blk_end_request_all(req, 0); 862 __blk_end_request_all(req, ret);
789 spin_unlock_irq(&md->lock); 863 spin_unlock_irq(&md->lock);
790 864
791 return 1; 865 return ret ? 0 : 1;
792} 866}
793 867
794/* 868/*
@@ -825,11 +899,11 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
825static int mmc_blk_err_check(struct mmc_card *card, 899static int mmc_blk_err_check(struct mmc_card *card,
826 struct mmc_async_req *areq) 900 struct mmc_async_req *areq)
827{ 901{
828 enum mmc_blk_status ret = MMC_BLK_SUCCESS;
829 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, 902 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
830 mmc_active); 903 mmc_active);
831 struct mmc_blk_request *brq = &mq_mrq->brq; 904 struct mmc_blk_request *brq = &mq_mrq->brq;
832 struct request *req = mq_mrq->req; 905 struct request *req = mq_mrq->req;
906 int ecc_err = 0;
833 907
834 /* 908 /*
835 * sbc.error indicates a problem with the set block count 909 * sbc.error indicates a problem with the set block count
@@ -841,8 +915,9 @@ static int mmc_blk_err_check(struct mmc_card *card,
841 * stop.error indicates a problem with the stop command. Data 915 * stop.error indicates a problem with the stop command. Data
842 * may have been transferred, or may still be transferring. 916 * may have been transferred, or may still be transferring.
843 */ 917 */
844 if (brq->sbc.error || brq->cmd.error || brq->stop.error) { 918 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
845 switch (mmc_blk_cmd_recovery(card, req, brq)) { 919 brq->data.error) {
920 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
846 case ERR_RETRY: 921 case ERR_RETRY:
847 return MMC_BLK_RETRY; 922 return MMC_BLK_RETRY;
848 case ERR_ABORT: 923 case ERR_ABORT:
@@ -873,7 +948,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
873 do { 948 do {
874 int err = get_card_status(card, &status, 5); 949 int err = get_card_status(card, &status, 5);
875 if (err) { 950 if (err) {
876 printk(KERN_ERR "%s: error %d requesting status\n", 951 pr_err("%s: error %d requesting status\n",
877 req->rq_disk->disk_name, err); 952 req->rq_disk->disk_name, err);
878 return MMC_BLK_CMD_ERR; 953 return MMC_BLK_CMD_ERR;
879 } 954 }
@@ -894,23 +969,21 @@ static int mmc_blk_err_check(struct mmc_card *card,
894 brq->cmd.resp[0], brq->stop.resp[0]); 969 brq->cmd.resp[0], brq->stop.resp[0]);
895 970
896 if (rq_data_dir(req) == READ) { 971 if (rq_data_dir(req) == READ) {
897 if (brq->data.blocks > 1) { 972 if (ecc_err)
898 /* Redo read one sector at a time */ 973 return MMC_BLK_ECC_ERR;
899 pr_warning("%s: retrying using single block read\n",
900 req->rq_disk->disk_name);
901 return MMC_BLK_RETRY_SINGLE;
902 }
903 return MMC_BLK_DATA_ERR; 974 return MMC_BLK_DATA_ERR;
904 } else { 975 } else {
905 return MMC_BLK_CMD_ERR; 976 return MMC_BLK_CMD_ERR;
906 } 977 }
907 } 978 }
908 979
909 if (ret == MMC_BLK_SUCCESS && 980 if (!brq->data.bytes_xfered)
910 blk_rq_bytes(req) != brq->data.bytes_xfered) 981 return MMC_BLK_RETRY;
911 ret = MMC_BLK_PARTIAL;
912 982
913 return ret; 983 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
984 return MMC_BLK_PARTIAL;
985
986 return MMC_BLK_SUCCESS;
914} 987}
915 988
916static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 989static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -957,13 +1030,20 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
957 if (brq->data.blocks > card->host->max_blk_count) 1030 if (brq->data.blocks > card->host->max_blk_count)
958 brq->data.blocks = card->host->max_blk_count; 1031 brq->data.blocks = card->host->max_blk_count;
959 1032
960 /* 1033 if (brq->data.blocks > 1) {
961 * After a read error, we redo the request one sector at a time 1034 /*
962 * in order to accurately determine which sectors can be read 1035 * After a read error, we redo the request one sector
963 * successfully. 1036 * at a time in order to accurately determine which
964 */ 1037 * sectors can be read successfully.
965 if (disable_multi && brq->data.blocks > 1) 1038 */
966 brq->data.blocks = 1; 1039 if (disable_multi)
1040 brq->data.blocks = 1;
1041
1042 /* Some controllers can't do multiblock reads due to hw bugs */
1043 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1044 rq_data_dir(req) == READ)
1045 brq->data.blocks = 1;
1046 }
967 1047
968 if (brq->data.blocks > 1 || do_rel_wr) { 1048 if (brq->data.blocks > 1 || do_rel_wr) {
969 /* SPI multiblock writes terminate using a special 1049 /* SPI multiblock writes terminate using a special
@@ -1049,12 +1129,41 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1049 mmc_queue_bounce_pre(mqrq); 1129 mmc_queue_bounce_pre(mqrq);
1050} 1130}
1051 1131
1132static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1133 struct mmc_blk_request *brq, struct request *req,
1134 int ret)
1135{
1136 /*
1137 * If this is an SD card and we're writing, we can first
1138 * mark the known good sectors as ok.
1139 *
1140 * If the card is not SD, we can still ok written sectors
1141 * as reported by the controller (which might be less than
1142 * the real number of written sectors, but never more).
1143 */
1144 if (mmc_card_sd(card)) {
1145 u32 blocks;
1146
1147 blocks = mmc_sd_num_wr_blocks(card);
1148 if (blocks != (u32)-1) {
1149 spin_lock_irq(&md->lock);
1150 ret = __blk_end_request(req, 0, blocks << 9);
1151 spin_unlock_irq(&md->lock);
1152 }
1153 } else {
1154 spin_lock_irq(&md->lock);
1155 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1156 spin_unlock_irq(&md->lock);
1157 }
1158 return ret;
1159}
1160
1052static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) 1161static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1053{ 1162{
1054 struct mmc_blk_data *md = mq->data; 1163 struct mmc_blk_data *md = mq->data;
1055 struct mmc_card *card = md->queue.card; 1164 struct mmc_card *card = md->queue.card;
1056 struct mmc_blk_request *brq = &mq->mqrq_cur->brq; 1165 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1057 int ret = 1, disable_multi = 0, retry = 0; 1166 int ret = 1, disable_multi = 0, retry = 0, type;
1058 enum mmc_blk_status status; 1167 enum mmc_blk_status status;
1059 struct mmc_queue_req *mq_rq; 1168 struct mmc_queue_req *mq_rq;
1060 struct request *req; 1169 struct request *req;
@@ -1076,6 +1185,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1076 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); 1185 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1077 brq = &mq_rq->brq; 1186 brq = &mq_rq->brq;
1078 req = mq_rq->req; 1187 req = mq_rq->req;
1188 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1079 mmc_queue_bounce_post(mq_rq); 1189 mmc_queue_bounce_post(mq_rq);
1080 1190
1081 switch (status) { 1191 switch (status) {
@@ -1084,18 +1194,18 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1084 /* 1194 /*
1085 * A block was successfully transferred. 1195 * A block was successfully transferred.
1086 */ 1196 */
1197 mmc_blk_reset_success(md, type);
1087 spin_lock_irq(&md->lock); 1198 spin_lock_irq(&md->lock);
1088 ret = __blk_end_request(req, 0, 1199 ret = __blk_end_request(req, 0,
1089 brq->data.bytes_xfered); 1200 brq->data.bytes_xfered);
1090 spin_unlock_irq(&md->lock); 1201 spin_unlock_irq(&md->lock);
1202 /*
1203 * If the blk_end_request function returns non-zero even
1204 * though all data has been transferred and no errors
1205 * were returned by the host controller, it's a bug.
1206 */
1091 if (status == MMC_BLK_SUCCESS && ret) { 1207 if (status == MMC_BLK_SUCCESS && ret) {
1092 /* 1208 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1093 * The blk_end_request has returned non zero
1094 * even though all data is transfered and no
1095 * erros returned by host.
1096 * If this happen it's a bug.
1097 */
1098 printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
1099 __func__, blk_rq_bytes(req), 1209 __func__, blk_rq_bytes(req),
1100 brq->data.bytes_xfered); 1210 brq->data.bytes_xfered);
1101 rqc = NULL; 1211 rqc = NULL;
@@ -1103,16 +1213,36 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1103 } 1213 }
1104 break; 1214 break;
1105 case MMC_BLK_CMD_ERR: 1215 case MMC_BLK_CMD_ERR:
1106 goto cmd_err; 1216 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1107 case MMC_BLK_RETRY_SINGLE: 1217 if (!mmc_blk_reset(md, card->host, type))
1108 disable_multi = 1; 1218 break;
1109 break; 1219 goto cmd_abort;
1110 case MMC_BLK_RETRY: 1220 case MMC_BLK_RETRY:
1111 if (retry++ < 5) 1221 if (retry++ < 5)
1112 break; 1222 break;
1223 /* Fall through */
1113 case MMC_BLK_ABORT: 1224 case MMC_BLK_ABORT:
1225 if (!mmc_blk_reset(md, card->host, type))
1226 break;
1114 goto cmd_abort; 1227 goto cmd_abort;
1115 case MMC_BLK_DATA_ERR: 1228 case MMC_BLK_DATA_ERR: {
1229 int err;
1230
1231 err = mmc_blk_reset(md, card->host, type);
1232 if (!err)
1233 break;
1234 if (err == -ENODEV)
1235 goto cmd_abort;
1236 /* Fall through */
1237 }
1238 case MMC_BLK_ECC_ERR:
1239 if (brq->data.blocks > 1) {
1240 /* Redo read one sector at a time */
1241 pr_warning("%s: retrying using single block read\n",
1242 req->rq_disk->disk_name);
1243 disable_multi = 1;
1244 break;
1245 }
1116 /* 1246 /*
1117 * After an error, we redo I/O one sector at a 1247 * After an error, we redo I/O one sector at a
1118 * time, so we only reach here after trying to 1248 * time, so we only reach here after trying to
@@ -1129,7 +1259,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1129 1259
1130 if (ret) { 1260 if (ret) {
1131 /* 1261 /*
1132 * In case of a none complete request 1262 * In case of a incomplete request
1133 * prepare it again and resend. 1263 * prepare it again and resend.
1134 */ 1264 */
1135 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); 1265 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
@@ -1139,30 +1269,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1139 1269
1140 return 1; 1270 return 1;
1141 1271
1142 cmd_err:
1143 /*
1144 * If this is an SD card and we're writing, we can first
1145 * mark the known good sectors as ok.
1146 *
1147 * If the card is not SD, we can still ok written sectors
1148 * as reported by the controller (which might be less than
1149 * the real number of written sectors, but never more).
1150 */
1151 if (mmc_card_sd(card)) {
1152 u32 blocks;
1153
1154 blocks = mmc_sd_num_wr_blocks(card);
1155 if (blocks != (u32)-1) {
1156 spin_lock_irq(&md->lock);
1157 ret = __blk_end_request(req, 0, blocks << 9);
1158 spin_unlock_irq(&md->lock);
1159 }
1160 } else {
1161 spin_lock_irq(&md->lock);
1162 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1163 spin_unlock_irq(&md->lock);
1164 }
1165
1166 cmd_abort: 1272 cmd_abort:
1167 spin_lock_irq(&md->lock); 1273 spin_lock_irq(&md->lock);
1168 while (ret) 1274 while (ret)
@@ -1190,6 +1296,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1190 1296
1191 ret = mmc_blk_part_switch(card, md); 1297 ret = mmc_blk_part_switch(card, md);
1192 if (ret) { 1298 if (ret) {
1299 if (req) {
1300 spin_lock_irq(&md->lock);
1301 __blk_end_request_all(req, -EIO);
1302 spin_unlock_irq(&md->lock);
1303 }
1193 ret = 0; 1304 ret = 0;
1194 goto out; 1305 goto out;
1195 } 1306 }
@@ -1374,32 +1485,35 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
1374 1485
1375 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, 1486 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1376 cap_str, sizeof(cap_str)); 1487 cap_str, sizeof(cap_str));
1377 printk(KERN_INFO "%s: %s %s partition %u %s\n", 1488 pr_info("%s: %s %s partition %u %s\n",
1378 part_md->disk->disk_name, mmc_card_id(card), 1489 part_md->disk->disk_name, mmc_card_id(card),
1379 mmc_card_name(card), part_md->part_type, cap_str); 1490 mmc_card_name(card), part_md->part_type, cap_str);
1380 return 0; 1491 return 0;
1381} 1492}
1382 1493
1494/* MMC Physical partitions consist of two boot partitions and
1495 * up to four general purpose partitions.
1496 * For each partition enabled in EXT_CSD a block device will be allocatedi
1497 * to provide access to the partition.
1498 */
1499
1383static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 1500static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1384{ 1501{
1385 int ret = 0; 1502 int idx, ret = 0;
1386 1503
1387 if (!mmc_card_mmc(card)) 1504 if (!mmc_card_mmc(card))
1388 return 0; 1505 return 0;
1389 1506
1390 if (card->ext_csd.boot_size) { 1507 for (idx = 0; idx < card->nr_parts; idx++) {
1391 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0, 1508 if (card->part[idx].size) {
1392 card->ext_csd.boot_size >> 9, 1509 ret = mmc_blk_alloc_part(card, md,
1393 true, 1510 card->part[idx].part_cfg,
1394 "boot0"); 1511 card->part[idx].size >> 9,
1395 if (ret) 1512 card->part[idx].force_ro,
1396 return ret; 1513 card->part[idx].name);
1397 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1, 1514 if (ret)
1398 card->ext_csd.boot_size >> 9, 1515 return ret;
1399 true, 1516 }
1400 "boot1");
1401 if (ret)
1402 return ret;
1403 } 1517 }
1404 1518
1405 return ret; 1519 return ret;
@@ -1415,7 +1529,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
1415 mmc_release_host(card->host); 1529 mmc_release_host(card->host);
1416 1530
1417 if (err) { 1531 if (err) {
1418 printk(KERN_ERR "%s: unable to set block size to 512: %d\n", 1532 pr_err("%s: unable to set block size to 512: %d\n",
1419 md->disk->disk_name, err); 1533 md->disk->disk_name, err);
1420 return -EINVAL; 1534 return -EINVAL;
1421 } 1535 }
@@ -1517,7 +1631,7 @@ static int mmc_blk_probe(struct mmc_card *card)
1517 1631
1518 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, 1632 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1519 cap_str, sizeof(cap_str)); 1633 cap_str, sizeof(cap_str));
1520 printk(KERN_INFO "%s: %s %s %s %s\n", 1634 pr_info("%s: %s %s %s %s\n",
1521 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1635 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1522 cap_str, md->read_only ? "(ro)" : ""); 1636 cap_str, md->read_only ? "(ro)" : "");
1523 1637
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 2bf229acd3b8..b038c4a9468b 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -22,6 +22,7 @@
22#include <linux/debugfs.h> 22#include <linux/debugfs.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/module.h>
25 26
26#define RESULT_OK 0 27#define RESULT_OK 0
27#define RESULT_FAIL 1 28#define RESULT_FAIL 1
@@ -250,7 +251,7 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
250 if (!busy && mmc_test_busy(&cmd)) { 251 if (!busy && mmc_test_busy(&cmd)) {
251 busy = 1; 252 busy = 1;
252 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 253 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
253 printk(KERN_INFO "%s: Warning: Host did not " 254 pr_info("%s: Warning: Host did not "
254 "wait for busy state to end.\n", 255 "wait for busy state to end.\n",
255 mmc_hostname(test->card->host)); 256 mmc_hostname(test->card->host));
256 } 257 }
@@ -552,7 +553,7 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
552 rate = mmc_test_rate(bytes, &ts); 553 rate = mmc_test_rate(bytes, &ts);
553 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 554 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
554 555
555 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 556 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
556 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 557 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
557 mmc_hostname(test->card->host), sectors, sectors >> 1, 558 mmc_hostname(test->card->host), sectors, sectors >> 1,
558 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, 559 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
@@ -578,7 +579,7 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
578 rate = mmc_test_rate(tot, &ts); 579 rate = mmc_test_rate(tot, &ts);
579 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 580 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
580 581
581 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 582 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
582 "%lu.%09lu seconds (%u kB/s, %u KiB/s, " 583 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
583 "%u.%02u IOPS, sg_len %d)\n", 584 "%u.%02u IOPS, sg_len %d)\n",
584 mmc_hostname(test->card->host), count, sectors, count, 585 mmc_hostname(test->card->host), count, sectors, count,
@@ -1408,7 +1409,7 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)
1408 1409
1409static int mmc_test_no_highmem(struct mmc_test_card *test) 1410static int mmc_test_no_highmem(struct mmc_test_card *test)
1410{ 1411{
1411 printk(KERN_INFO "%s: Highmem not configured - test skipped\n", 1412 pr_info("%s: Highmem not configured - test skipped\n",
1412 mmc_hostname(test->card->host)); 1413 mmc_hostname(test->card->host));
1413 return 0; 1414 return 0;
1414} 1415}
@@ -1435,7 +1436,7 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1435 t->max_seg_sz, &t->sg_len, min_sg_len); 1436 t->max_seg_sz, &t->sg_len, min_sg_len);
1436 } 1437 }
1437 if (err) 1438 if (err)
1438 printk(KERN_INFO "%s: Failed to map sg list\n", 1439 pr_info("%s: Failed to map sg list\n",
1439 mmc_hostname(test->card->host)); 1440 mmc_hostname(test->card->host));
1440 return err; 1441 return err;
1441} 1442}
@@ -2135,7 +2136,7 @@ static int mmc_test_rw_multiple(struct mmc_test_card *test,
2135 2136
2136 return ret; 2137 return ret;
2137 err: 2138 err:
2138 printk(KERN_INFO "[%s] error\n", __func__); 2139 pr_info("[%s] error\n", __func__);
2139 return ret; 2140 return ret;
2140} 2141}
2141 2142
@@ -2149,7 +2150,7 @@ static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2149 2150
2150 if (rw->do_nonblock_req && 2151 if (rw->do_nonblock_req &&
2151 ((!pre_req && post_req) || (pre_req && !post_req))) { 2152 ((!pre_req && post_req) || (pre_req && !post_req))) {
2152 printk(KERN_INFO "error: only one of pre/post is defined\n"); 2153 pr_info("error: only one of pre/post is defined\n");
2153 return -EINVAL; 2154 return -EINVAL;
2154 } 2155 }
2155 2156
@@ -2328,6 +2329,31 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2328 return mmc_test_rw_multiple_sg_len(test, &test_data); 2329 return mmc_test_rw_multiple_sg_len(test, &test_data);
2329} 2330}
2330 2331
2332/*
2333 * eMMC hardware reset.
2334 */
2335static int mmc_test_hw_reset(struct mmc_test_card *test)
2336{
2337 struct mmc_card *card = test->card;
2338 struct mmc_host *host = card->host;
2339 int err;
2340
2341 err = mmc_hw_reset_check(host);
2342 if (!err)
2343 return RESULT_OK;
2344
2345 if (err == -ENOSYS)
2346 return RESULT_FAIL;
2347
2348 if (err != -EOPNOTSUPP)
2349 return err;
2350
2351 if (!mmc_can_reset(card))
2352 return RESULT_UNSUP_CARD;
2353
2354 return RESULT_UNSUP_HOST;
2355}
2356
2331static const struct mmc_test_case mmc_test_cases[] = { 2357static const struct mmc_test_case mmc_test_cases[] = {
2332 { 2358 {
2333 .name = "Basic write (no data verification)", 2359 .name = "Basic write (no data verification)",
@@ -2650,6 +2676,11 @@ static const struct mmc_test_case mmc_test_cases[] = {
2650 .run = mmc_test_profile_sglen_r_nonblock_perf, 2676 .run = mmc_test_profile_sglen_r_nonblock_perf,
2651 .cleanup = mmc_test_area_cleanup, 2677 .cleanup = mmc_test_area_cleanup,
2652 }, 2678 },
2679
2680 {
2681 .name = "eMMC hardware reset",
2682 .run = mmc_test_hw_reset,
2683 },
2653}; 2684};
2654 2685
2655static DEFINE_MUTEX(mmc_test_lock); 2686static DEFINE_MUTEX(mmc_test_lock);
@@ -2660,7 +2691,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
2660{ 2691{
2661 int i, ret; 2692 int i, ret;
2662 2693
2663 printk(KERN_INFO "%s: Starting tests of card %s...\n", 2694 pr_info("%s: Starting tests of card %s...\n",
2664 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2695 mmc_hostname(test->card->host), mmc_card_id(test->card));
2665 2696
2666 mmc_claim_host(test->card->host); 2697 mmc_claim_host(test->card->host);
@@ -2671,14 +2702,14 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
2671 if (testcase && ((i + 1) != testcase)) 2702 if (testcase && ((i + 1) != testcase))
2672 continue; 2703 continue;
2673 2704
2674 printk(KERN_INFO "%s: Test case %d. %s...\n", 2705 pr_info("%s: Test case %d. %s...\n",
2675 mmc_hostname(test->card->host), i + 1, 2706 mmc_hostname(test->card->host), i + 1,
2676 mmc_test_cases[i].name); 2707 mmc_test_cases[i].name);
2677 2708
2678 if (mmc_test_cases[i].prepare) { 2709 if (mmc_test_cases[i].prepare) {
2679 ret = mmc_test_cases[i].prepare(test); 2710 ret = mmc_test_cases[i].prepare(test);
2680 if (ret) { 2711 if (ret) {
2681 printk(KERN_INFO "%s: Result: Prepare " 2712 pr_info("%s: Result: Prepare "
2682 "stage failed! (%d)\n", 2713 "stage failed! (%d)\n",
2683 mmc_hostname(test->card->host), 2714 mmc_hostname(test->card->host),
2684 ret); 2715 ret);
@@ -2708,25 +2739,25 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
2708 ret = mmc_test_cases[i].run(test); 2739 ret = mmc_test_cases[i].run(test);
2709 switch (ret) { 2740 switch (ret) {
2710 case RESULT_OK: 2741 case RESULT_OK:
2711 printk(KERN_INFO "%s: Result: OK\n", 2742 pr_info("%s: Result: OK\n",
2712 mmc_hostname(test->card->host)); 2743 mmc_hostname(test->card->host));
2713 break; 2744 break;
2714 case RESULT_FAIL: 2745 case RESULT_FAIL:
2715 printk(KERN_INFO "%s: Result: FAILED\n", 2746 pr_info("%s: Result: FAILED\n",
2716 mmc_hostname(test->card->host)); 2747 mmc_hostname(test->card->host));
2717 break; 2748 break;
2718 case RESULT_UNSUP_HOST: 2749 case RESULT_UNSUP_HOST:
2719 printk(KERN_INFO "%s: Result: UNSUPPORTED " 2750 pr_info("%s: Result: UNSUPPORTED "
2720 "(by host)\n", 2751 "(by host)\n",
2721 mmc_hostname(test->card->host)); 2752 mmc_hostname(test->card->host));
2722 break; 2753 break;
2723 case RESULT_UNSUP_CARD: 2754 case RESULT_UNSUP_CARD:
2724 printk(KERN_INFO "%s: Result: UNSUPPORTED " 2755 pr_info("%s: Result: UNSUPPORTED "
2725 "(by card)\n", 2756 "(by card)\n",
2726 mmc_hostname(test->card->host)); 2757 mmc_hostname(test->card->host));
2727 break; 2758 break;
2728 default: 2759 default:
2729 printk(KERN_INFO "%s: Result: ERROR (%d)\n", 2760 pr_info("%s: Result: ERROR (%d)\n",
2730 mmc_hostname(test->card->host), ret); 2761 mmc_hostname(test->card->host), ret);
2731 } 2762 }
2732 2763
@@ -2737,7 +2768,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
2737 if (mmc_test_cases[i].cleanup) { 2768 if (mmc_test_cases[i].cleanup) {
2738 ret = mmc_test_cases[i].cleanup(test); 2769 ret = mmc_test_cases[i].cleanup(test);
2739 if (ret) { 2770 if (ret) {
2740 printk(KERN_INFO "%s: Warning: Cleanup " 2771 pr_info("%s: Warning: Cleanup "
2741 "stage failed! (%d)\n", 2772 "stage failed! (%d)\n",
2742 mmc_hostname(test->card->host), 2773 mmc_hostname(test->card->host),
2743 ret); 2774 ret);
@@ -2747,7 +2778,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
2747 2778
2748 mmc_release_host(test->card->host); 2779 mmc_release_host(test->card->host);
2749 2780
2750 printk(KERN_INFO "%s: Tests completed.\n", 2781 pr_info("%s: Tests completed.\n",
2751 mmc_hostname(test->card->host)); 2782 mmc_hostname(test->card->host));
2752} 2783}
2753 2784
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 45fb362e3f01..dcad59cbfef1 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -108,7 +108,7 @@ static void mmc_request(struct request_queue *q)
108 wake_up_process(mq->thread); 108 wake_up_process(mq->thread);
109} 109}
110 110
111struct scatterlist *mmc_alloc_sg(int sg_len, int *err) 111static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
112{ 112{
113 struct scatterlist *sg; 113 struct scatterlist *sg;
114 114
@@ -140,7 +140,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
140 /* granularity must not be greater than max. discard */ 140 /* granularity must not be greater than max. discard */
141 if (card->pref_erase > max_discard) 141 if (card->pref_erase > max_discard)
142 q->limits.discard_granularity = 0; 142 q->limits.discard_granularity = 0;
143 if (mmc_can_secure_erase_trim(card)) 143 if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
144 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); 144 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
145} 145}
146 146
@@ -197,13 +197,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
197 if (bouncesz > 512) { 197 if (bouncesz > 512) {
198 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 198 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
199 if (!mqrq_cur->bounce_buf) { 199 if (!mqrq_cur->bounce_buf) {
200 printk(KERN_WARNING "%s: unable to " 200 pr_warning("%s: unable to "
201 "allocate bounce cur buffer\n", 201 "allocate bounce cur buffer\n",
202 mmc_card_name(card)); 202 mmc_card_name(card));
203 } 203 }
204 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 204 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
205 if (!mqrq_prev->bounce_buf) { 205 if (!mqrq_prev->bounce_buf) {
206 printk(KERN_WARNING "%s: unable to " 206 pr_warning("%s: unable to "
207 "allocate bounce prev buffer\n", 207 "allocate bounce prev buffer\n",
208 mmc_card_name(card)); 208 mmc_card_name(card));
209 kfree(mqrq_cur->bounce_buf); 209 kfree(mqrq_cur->bounce_buf);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index c8c9edb3d7cb..2c151e18c9e8 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -1082,7 +1082,7 @@ static int sdio_uart_probe(struct sdio_func *func,
1082 return -ENOMEM; 1082 return -ENOMEM;
1083 1083
1084 if (func->class == SDIO_CLASS_UART) { 1084 if (func->class == SDIO_CLASS_UART) {
1085 printk(KERN_WARNING "%s: need info on UART class basic setup\n", 1085 pr_warning("%s: need info on UART class basic setup\n",
1086 sdio_func_id(func)); 1086 sdio_func_id(func));
1087 kfree(port); 1087 kfree(port);
1088 return -ENOSYS; 1088 return -ENOSYS;
@@ -1101,23 +1101,23 @@ static int sdio_uart_probe(struct sdio_func *func,
1101 break; 1101 break;
1102 } 1102 }
1103 if (!tpl) { 1103 if (!tpl) {
1104 printk(KERN_WARNING 1104 pr_warning(
1105 "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n", 1105 "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
1106 sdio_func_id(func)); 1106 sdio_func_id(func));
1107 kfree(port); 1107 kfree(port);
1108 return -EINVAL; 1108 return -EINVAL;
1109 } 1109 }
1110 printk(KERN_DEBUG "%s: Register ID = 0x%02x, Exp ID = 0x%02x\n", 1110 pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
1111 sdio_func_id(func), tpl->data[2], tpl->data[3]); 1111 sdio_func_id(func), tpl->data[2], tpl->data[3]);
1112 port->regs_offset = (tpl->data[4] << 0) | 1112 port->regs_offset = (tpl->data[4] << 0) |
1113 (tpl->data[5] << 8) | 1113 (tpl->data[5] << 8) |
1114 (tpl->data[6] << 16); 1114 (tpl->data[6] << 16);
1115 printk(KERN_DEBUG "%s: regs offset = 0x%x\n", 1115 pr_debug("%s: regs offset = 0x%x\n",
1116 sdio_func_id(func), port->regs_offset); 1116 sdio_func_id(func), port->regs_offset);
1117 port->uartclk = tpl->data[7] * 115200; 1117 port->uartclk = tpl->data[7] * 115200;
1118 if (port->uartclk == 0) 1118 if (port->uartclk == 0)
1119 port->uartclk = 115200; 1119 port->uartclk = 115200;
1120 printk(KERN_DEBUG "%s: clk %d baudcode %u 4800-div %u\n", 1120 pr_debug("%s: clk %d baudcode %u 4800-div %u\n",
1121 sdio_func_id(func), port->uartclk, 1121 sdio_func_id(func), port->uartclk,
1122 tpl->data[7], tpl->data[8] | (tpl->data[9] << 8)); 1122 tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));
1123 } else { 1123 } else {