aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/mmc_ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/core/mmc_ops.c')
-rw-r--r--drivers/mmc/core/mmc_ops.c130
1 files changed, 104 insertions, 26 deletions
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 7911e0510a1d..3b044c5b029c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -264,20 +264,6 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
264 struct mmc_command cmd = {0}; 264 struct mmc_command cmd = {0};
265 struct mmc_data data = {0}; 265 struct mmc_data data = {0};
266 struct scatterlist sg; 266 struct scatterlist sg;
267 void *data_buf;
268 int is_on_stack;
269
270 is_on_stack = object_is_on_stack(buf);
271 if (is_on_stack) {
272 /*
273 * dma onto stack is unsafe/nonportable, but callers to this
274 * routine normally provide temporary on-stack buffers ...
275 */
276 data_buf = kmalloc(len, GFP_KERNEL);
277 if (!data_buf)
278 return -ENOMEM;
279 } else
280 data_buf = buf;
281 267
282 mrq.cmd = &cmd; 268 mrq.cmd = &cmd;
283 mrq.data = &data; 269 mrq.data = &data;
@@ -298,7 +284,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
298 data.sg = &sg; 284 data.sg = &sg;
299 data.sg_len = 1; 285 data.sg_len = 1;
300 286
301 sg_init_one(&sg, data_buf, len); 287 sg_init_one(&sg, buf, len);
302 288
303 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 289 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
304 /* 290 /*
@@ -312,11 +298,6 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
312 298
313 mmc_wait_for_req(host, &mrq); 299 mmc_wait_for_req(host, &mrq);
314 300
315 if (is_on_stack) {
316 memcpy(buf, data_buf, len);
317 kfree(data_buf);
318 }
319
320 if (cmd.error) 301 if (cmd.error)
321 return cmd.error; 302 return cmd.error;
322 if (data.error) 303 if (data.error)
@@ -334,7 +315,7 @@ int mmc_send_csd(struct mmc_card *card, u32 *csd)
334 return mmc_send_cxd_native(card->host, card->rca << 16, 315 return mmc_send_cxd_native(card->host, card->rca << 16,
335 csd, MMC_SEND_CSD); 316 csd, MMC_SEND_CSD);
336 317
337 csd_tmp = kmalloc(16, GFP_KERNEL); 318 csd_tmp = kzalloc(16, GFP_KERNEL);
338 if (!csd_tmp) 319 if (!csd_tmp)
339 return -ENOMEM; 320 return -ENOMEM;
340 321
@@ -362,7 +343,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid)
362 cid, MMC_SEND_CID); 343 cid, MMC_SEND_CID);
363 } 344 }
364 345
365 cid_tmp = kmalloc(16, GFP_KERNEL); 346 cid_tmp = kzalloc(16, GFP_KERNEL);
366 if (!cid_tmp) 347 if (!cid_tmp)
367 return -ENOMEM; 348 return -ENOMEM;
368 349
@@ -378,12 +359,35 @@ err:
378 return ret; 359 return ret;
379} 360}
380 361
381int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) 362int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
382{ 363{
383 return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, 364 int err;
384 ext_csd, 512); 365 u8 *ext_csd;
366
367 if (!card || !new_ext_csd)
368 return -EINVAL;
369
370 if (!mmc_can_ext_csd(card))
371 return -EOPNOTSUPP;
372
373 /*
374 * As the ext_csd is so large and mostly unused, we don't store the
375 * raw block in mmc_card.
376 */
377 ext_csd = kzalloc(512, GFP_KERNEL);
378 if (!ext_csd)
379 return -ENOMEM;
380
381 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
382 512);
383 if (err)
384 kfree(ext_csd);
385 else
386 *new_ext_csd = ext_csd;
387
388 return err;
385} 389}
386EXPORT_SYMBOL_GPL(mmc_send_ext_csd); 390EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
387 391
388int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 392int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
389{ 393{
@@ -543,6 +547,75 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
543} 547}
544EXPORT_SYMBOL_GPL(mmc_switch); 548EXPORT_SYMBOL_GPL(mmc_switch);
545 549
550int mmc_send_tuning(struct mmc_host *host)
551{
552 struct mmc_request mrq = {NULL};
553 struct mmc_command cmd = {0};
554 struct mmc_data data = {0};
555 struct scatterlist sg;
556 struct mmc_ios *ios = &host->ios;
557 const u8 *tuning_block_pattern;
558 int size, err = 0;
559 u8 *data_buf;
560 u32 opcode;
561
562 if (ios->bus_width == MMC_BUS_WIDTH_8) {
563 tuning_block_pattern = tuning_blk_pattern_8bit;
564 size = sizeof(tuning_blk_pattern_8bit);
565 opcode = MMC_SEND_TUNING_BLOCK_HS200;
566 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
567 tuning_block_pattern = tuning_blk_pattern_4bit;
568 size = sizeof(tuning_blk_pattern_4bit);
569 opcode = MMC_SEND_TUNING_BLOCK;
570 } else
571 return -EINVAL;
572
573 data_buf = kzalloc(size, GFP_KERNEL);
574 if (!data_buf)
575 return -ENOMEM;
576
577 mrq.cmd = &cmd;
578 mrq.data = &data;
579
580 cmd.opcode = opcode;
581 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
582
583 data.blksz = size;
584 data.blocks = 1;
585 data.flags = MMC_DATA_READ;
586
587 /*
588 * According to the tuning specs, Tuning process
589 * is normally shorter 40 executions of CMD19,
590 * and timeout value should be shorter than 150 ms
591 */
592 data.timeout_ns = 150 * NSEC_PER_MSEC;
593
594 data.sg = &sg;
595 data.sg_len = 1;
596 sg_init_one(&sg, data_buf, size);
597
598 mmc_wait_for_req(host, &mrq);
599
600 if (cmd.error) {
601 err = cmd.error;
602 goto out;
603 }
604
605 if (data.error) {
606 err = data.error;
607 goto out;
608 }
609
610 if (memcmp(data_buf, tuning_block_pattern, size))
611 err = -EIO;
612
613out:
614 kfree(data_buf);
615 return err;
616}
617EXPORT_SYMBOL_GPL(mmc_send_tuning);
618
546static int 619static int
547mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 620mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
548 u8 len) 621 u8 len)
@@ -675,3 +748,8 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
675 748
676 return 0; 749 return 0;
677} 750}
751
752int mmc_can_ext_csd(struct mmc_card *card)
753{
754 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
755}