aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/sh_mmcif.c94
1 files changed, 50 insertions, 44 deletions
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 9371f3a4939b..4a2c5b2355f2 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -208,7 +208,6 @@ enum mmcif_wait_for {
208 208
209struct sh_mmcif_host { 209struct sh_mmcif_host {
210 struct mmc_host *mmc; 210 struct mmc_host *mmc;
211 struct mmc_data *data;
212 struct mmc_request *mrq; 211 struct mmc_request *mrq;
213 struct platform_device *pd; 212 struct platform_device *pd;
214 struct sh_dmae_slave dma_slave_tx; 213 struct sh_dmae_slave dma_slave_tx;
@@ -253,19 +252,21 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
253static void mmcif_dma_complete(void *arg) 252static void mmcif_dma_complete(void *arg)
254{ 253{
255 struct sh_mmcif_host *host = arg; 254 struct sh_mmcif_host *host = arg;
255 struct mmc_data *data = host->mrq->data;
256
256 dev_dbg(&host->pd->dev, "Command completed\n"); 257 dev_dbg(&host->pd->dev, "Command completed\n");
257 258
258 if (WARN(!host->data, "%s: NULL data in DMA completion!\n", 259 if (WARN(!data, "%s: NULL data in DMA completion!\n",
259 dev_name(&host->pd->dev))) 260 dev_name(&host->pd->dev)))
260 return; 261 return;
261 262
262 if (host->data->flags & MMC_DATA_READ) 263 if (data->flags & MMC_DATA_READ)
263 dma_unmap_sg(host->chan_rx->device->dev, 264 dma_unmap_sg(host->chan_rx->device->dev,
264 host->data->sg, host->data->sg_len, 265 data->sg, data->sg_len,
265 DMA_FROM_DEVICE); 266 DMA_FROM_DEVICE);
266 else 267 else
267 dma_unmap_sg(host->chan_tx->device->dev, 268 dma_unmap_sg(host->chan_tx->device->dev,
268 host->data->sg, host->data->sg_len, 269 data->sg, data->sg_len,
269 DMA_TO_DEVICE); 270 DMA_TO_DEVICE);
270 271
271 complete(&host->dma_complete); 272 complete(&host->dma_complete);
@@ -273,13 +274,14 @@ static void mmcif_dma_complete(void *arg)
273 274
274static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) 275static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
275{ 276{
276 struct scatterlist *sg = host->data->sg; 277 struct mmc_data *data = host->mrq->data;
278 struct scatterlist *sg = data->sg;
277 struct dma_async_tx_descriptor *desc = NULL; 279 struct dma_async_tx_descriptor *desc = NULL;
278 struct dma_chan *chan = host->chan_rx; 280 struct dma_chan *chan = host->chan_rx;
279 dma_cookie_t cookie = -EINVAL; 281 dma_cookie_t cookie = -EINVAL;
280 int ret; 282 int ret;
281 283
282 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len, 284 ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
283 DMA_FROM_DEVICE); 285 DMA_FROM_DEVICE);
284 if (ret > 0) { 286 if (ret > 0) {
285 host->dma_active = true; 287 host->dma_active = true;
@@ -295,7 +297,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
295 dma_async_issue_pending(chan); 297 dma_async_issue_pending(chan);
296 } 298 }
297 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", 299 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
298 __func__, host->data->sg_len, ret, cookie); 300 __func__, data->sg_len, ret, cookie);
299 301
300 if (!desc) { 302 if (!desc) {
301 /* DMA failed, fall back to PIO */ 303 /* DMA failed, fall back to PIO */
@@ -316,18 +318,19 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
316 } 318 }
317 319
318 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 320 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
319 desc, cookie, host->data->sg_len); 321 desc, cookie, data->sg_len);
320} 322}
321 323
322static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) 324static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
323{ 325{
324 struct scatterlist *sg = host->data->sg; 326 struct mmc_data *data = host->mrq->data;
327 struct scatterlist *sg = data->sg;
325 struct dma_async_tx_descriptor *desc = NULL; 328 struct dma_async_tx_descriptor *desc = NULL;
326 struct dma_chan *chan = host->chan_tx; 329 struct dma_chan *chan = host->chan_tx;
327 dma_cookie_t cookie = -EINVAL; 330 dma_cookie_t cookie = -EINVAL;
328 int ret; 331 int ret;
329 332
330 ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len, 333 ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
331 DMA_TO_DEVICE); 334 DMA_TO_DEVICE);
332 if (ret > 0) { 335 if (ret > 0) {
333 host->dma_active = true; 336 host->dma_active = true;
@@ -343,7 +346,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
343 dma_async_issue_pending(chan); 346 dma_async_issue_pending(chan);
344 } 347 }
345 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", 348 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
346 __func__, host->data->sg_len, ret, cookie); 349 __func__, data->sg_len, ret, cookie);
347 350
348 if (!desc) { 351 if (!desc) {
349 /* DMA failed, fall back to PIO */ 352 /* DMA failed, fall back to PIO */
@@ -711,8 +714,11 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
711} 714}
712 715
713static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, 716static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
714 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc) 717 struct mmc_request *mrq)
715{ 718{
719 struct mmc_data *data = mrq->data;
720 struct mmc_command *cmd = mrq->cmd;
721 u32 opc = cmd->opcode;
716 u32 tmp = 0; 722 u32 tmp = 0;
717 723
718 /* Response Type check */ 724 /* Response Type check */
@@ -744,7 +750,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
744 break; 750 break;
745 } 751 }
746 /* WDAT / DATW */ 752 /* WDAT / DATW */
747 if (host->data) { 753 if (data) {
748 tmp |= CMD_SET_WDAT; 754 tmp |= CMD_SET_WDAT;
749 switch (host->bus_width) { 755 switch (host->bus_width) {
750 case MMC_BUS_WIDTH_1: 756 case MMC_BUS_WIDTH_1:
@@ -768,7 +774,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
768 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { 774 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
769 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; 775 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
770 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, 776 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
771 mrq->data->blocks << 16); 777 data->blocks << 16);
772 } 778 }
773 /* RIDXC[1:0] check bits */ 779 /* RIDXC[1:0] check bits */
774 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || 780 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
@@ -782,7 +788,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
782 opc == MMC_SEND_CSD || opc == MMC_SEND_CID) 788 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
783 tmp |= CMD_SET_CRC7C_INTERNAL; 789 tmp |= CMD_SET_CRC7C_INTERNAL;
784 790
785 return opc = ((opc << 24) | tmp); 791 return (opc << 24) | tmp;
786} 792}
787 793
788static int sh_mmcif_data_trans(struct sh_mmcif_host *host, 794static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
@@ -830,12 +836,12 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
830 break; 836 break;
831 } 837 }
832 838
833 if (host->data) { 839 if (mrq->data) {
834 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); 840 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
835 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 841 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
836 mrq->data->blksz); 842 mrq->data->blksz);
837 } 843 }
838 opc = sh_mmcif_set_cmd(host, mrq, cmd, opc); 844 opc = sh_mmcif_set_cmd(host, mrq);
839 845
840 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); 846 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
841 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); 847 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
@@ -851,15 +857,16 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
851static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, 857static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
852 struct mmc_request *mrq) 858 struct mmc_request *mrq)
853{ 859{
854 struct mmc_command *cmd = mrq->stop; 860 switch (mrq->cmd->opcode) {
855 861 case MMC_READ_MULTIPLE_BLOCK:
856 if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
857 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); 862 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
858 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) 863 break;
864 case MMC_WRITE_MULTIPLE_BLOCK:
859 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); 865 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
860 else { 866 break;
867 default:
861 dev_err(&host->pd->dev, "unsupported stop cmd\n"); 868 dev_err(&host->pd->dev, "unsupported stop cmd\n");
862 cmd->error = sh_mmcif_error_manage(host); 869 mrq->stop->error = sh_mmcif_error_manage(host);
863 return; 870 return;
864 } 871 }
865 872
@@ -905,7 +912,6 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
905 } 912 }
906 913
907 host->mrq = mrq; 914 host->mrq = mrq;
908 host->data = mrq->data;
909 915
910 sh_mmcif_start_cmd(host, mrq); 916 sh_mmcif_start_cmd(host, mrq);
911} 917}
@@ -985,6 +991,7 @@ static struct mmc_host_ops sh_mmcif_ops = {
985static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) 991static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
986{ 992{
987 struct mmc_command *cmd = host->mrq->cmd; 993 struct mmc_command *cmd = host->mrq->cmd;
994 struct mmc_data *data = host->mrq->data;
988 long time; 995 long time;
989 996
990 if (host->sd_error) { 997 if (host->sd_error) {
@@ -1010,10 +1017,10 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1010 1017
1011 sh_mmcif_get_response(host, cmd); 1018 sh_mmcif_get_response(host, cmd);
1012 1019
1013 if (!host->data) 1020 if (!data)
1014 return false; 1021 return false;
1015 1022
1016 if (host->mrq->data->flags & MMC_DATA_READ) { 1023 if (data->flags & MMC_DATA_READ) {
1017 if (host->chan_rx) 1024 if (host->chan_rx)
1018 sh_mmcif_start_dma_rx(host); 1025 sh_mmcif_start_dma_rx(host);
1019 } else { 1026 } else {
@@ -1022,8 +1029,8 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1022 } 1029 }
1023 1030
1024 if (!host->dma_active) { 1031 if (!host->dma_active) {
1025 host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); 1032 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1026 if (!host->data->error) 1033 if (!data->error)
1027 return true; 1034 return true;
1028 return false; 1035 return false;
1029 } 1036 }
@@ -1035,22 +1042,22 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1035 dev_err(host->mmc->parent, 1042 dev_err(host->mmc->parent,
1036 "Error IRQ while waiting for DMA completion!\n"); 1043 "Error IRQ while waiting for DMA completion!\n");
1037 /* Woken up by an error IRQ: abort DMA */ 1044 /* Woken up by an error IRQ: abort DMA */
1038 if (host->data->flags & MMC_DATA_READ) 1045 if (data->flags & MMC_DATA_READ)
1039 dmaengine_terminate_all(host->chan_rx); 1046 dmaengine_terminate_all(host->chan_rx);
1040 else 1047 else
1041 dmaengine_terminate_all(host->chan_tx); 1048 dmaengine_terminate_all(host->chan_tx);
1042 host->data->error = sh_mmcif_error_manage(host); 1049 data->error = sh_mmcif_error_manage(host);
1043 } else if (!time) { 1050 } else if (!time) {
1044 host->data->error = -ETIMEDOUT; 1051 data->error = -ETIMEDOUT;
1045 } else if (time < 0) { 1052 } else if (time < 0) {
1046 host->data->error = time; 1053 data->error = time;
1047 } 1054 }
1048 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, 1055 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1049 BUF_ACC_DMAREN | BUF_ACC_DMAWEN); 1056 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1050 host->dma_active = false; 1057 host->dma_active = false;
1051 1058
1052 if (host->data->error) 1059 if (data->error)
1053 host->data->bytes_xfered = 0; 1060 data->bytes_xfered = 0;
1054 1061
1055 return false; 1062 return false;
1056} 1063}
@@ -1059,6 +1066,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1059{ 1066{
1060 struct sh_mmcif_host *host = dev_id; 1067 struct sh_mmcif_host *host = dev_id;
1061 struct mmc_request *mrq = host->mrq; 1068 struct mmc_request *mrq = host->mrq;
1069 struct mmc_data *data = mrq->data;
1062 1070
1063 cancel_delayed_work_sync(&host->timeout_work); 1071 cancel_delayed_work_sync(&host->timeout_work);
1064 1072
@@ -1106,20 +1114,18 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1106 case MMCIF_WAIT_FOR_READ_END: 1114 case MMCIF_WAIT_FOR_READ_END:
1107 case MMCIF_WAIT_FOR_WRITE_END: 1115 case MMCIF_WAIT_FOR_WRITE_END:
1108 if (host->sd_error) 1116 if (host->sd_error)
1109 mrq->data->error = sh_mmcif_error_manage(host); 1117 data->error = sh_mmcif_error_manage(host);
1110 break; 1118 break;
1111 default: 1119 default:
1112 BUG(); 1120 BUG();
1113 } 1121 }
1114 1122
1115 if (host->wait_for != MMCIF_WAIT_FOR_STOP) { 1123 if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1116 host->data = NULL; 1124 if (!mrq->cmd->error && data && !data->error)
1125 data->bytes_xfered =
1126 data->blocks * data->blksz;
1117 1127
1118 if (!mrq->cmd->error && mrq->data && !mrq->data->error) 1128 if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1119 mrq->data->bytes_xfered =
1120 mrq->data->blocks * mrq->data->blksz;
1121
1122 if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
1123 sh_mmcif_stop_cmd(host, mrq); 1129 sh_mmcif_stop_cmd(host, mrq);
1124 if (!mrq->stop->error) 1130 if (!mrq->stop->error)
1125 return IRQ_HANDLED; 1131 return IRQ_HANDLED;
@@ -1128,6 +1134,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1128 1134
1129 host->wait_for = MMCIF_WAIT_FOR_REQUEST; 1135 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1130 host->state = STATE_IDLE; 1136 host->state = STATE_IDLE;
1137 host->mrq = NULL;
1131 mmc_request_done(host->mmc, mrq); 1138 mmc_request_done(host->mmc, mrq);
1132 1139
1133 return IRQ_HANDLED; 1140 return IRQ_HANDLED;
@@ -1223,7 +1230,7 @@ static void mmcif_timeout_work(struct work_struct *work)
1223 case MMCIF_WAIT_FOR_WRITE: 1230 case MMCIF_WAIT_FOR_WRITE:
1224 case MMCIF_WAIT_FOR_READ_END: 1231 case MMCIF_WAIT_FOR_READ_END:
1225 case MMCIF_WAIT_FOR_WRITE_END: 1232 case MMCIF_WAIT_FOR_WRITE_END:
1226 host->data->error = sh_mmcif_error_manage(host); 1233 mrq->data->error = sh_mmcif_error_manage(host);
1227 break; 1234 break;
1228 default: 1235 default:
1229 BUG(); 1236 BUG();
@@ -1231,7 +1238,6 @@ static void mmcif_timeout_work(struct work_struct *work)
1231 1238
1232 host->state = STATE_IDLE; 1239 host->state = STATE_IDLE;
1233 host->wait_for = MMCIF_WAIT_FOR_REQUEST; 1240 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1234 host->data = NULL;
1235 host->mrq = NULL; 1241 host->mrq = NULL;
1236 mmc_request_done(host->mmc, mrq); 1242 mmc_request_done(host->mmc, mrq);
1237} 1243}