aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
authorSeungwon Jeon <tgih.jun@samsung.com>2013-02-06 03:02:46 -0500
committerChris Ball <cjb@laptop.org>2013-02-24 14:37:16 -0500
commitce39f9d17c14e56ea6772aa84393e6e0cc8499c4 (patch)
tree7b641a7f89614e3cc3f6ec2f81ee32f64ead4f0d /drivers/mmc/card/queue.c
parentabd9ac144947d9a604beb763339e2f77ce8bec79 (diff)
mmc: support packed write command for eMMC4.5 devices
This patch supports packed write command of eMMC4.5 devices. Several writes can be grouped in packed command and all data of the individual commands can be sent in a single transfer on the bus. Large amounts of data in one transfer rather than several data of small size are effective for eMMC write internally. As a result, packed command help write throughput be improved. The following tables show the results of packed write. Type A: test none | packed iozone 25.8 | 31 tiotest 27.6 | 31.2 lmdd 31.2 | 35.4 Type B: test none | packed iozone 44.1 | 51.1 tiotest 47.9 | 52.5 lmdd 51.6 | 59.2 Type C: test none | packed iozone 19.5 | 32 tiotest 19.9 | 34.5 lmdd 22.8 | 40.7 Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Reviewed-by: Maya Erez <merez@codeaurora.org> Reviewed-by: Namjae Jeon <linkinjeon@gmail.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c96
1 files changed, 93 insertions, 3 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 5e0971016ac5..fa4e44ee7961 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -362,6 +362,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
362} 362}
363EXPORT_SYMBOL(mmc_cleanup_queue); 363EXPORT_SYMBOL(mmc_cleanup_queue);
364 364
365int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
366{
367 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
368 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
369 int ret = 0;
370
371
372 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
373 if (!mqrq_cur->packed) {
374 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
375 mmc_card_name(card));
376 ret = -ENOMEM;
377 goto out;
378 }
379
380 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
381 if (!mqrq_prev->packed) {
382 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
383 mmc_card_name(card));
384 kfree(mqrq_cur->packed);
385 mqrq_cur->packed = NULL;
386 ret = -ENOMEM;
387 goto out;
388 }
389
390 INIT_LIST_HEAD(&mqrq_cur->packed->list);
391 INIT_LIST_HEAD(&mqrq_prev->packed->list);
392
393out:
394 return ret;
395}
396
397void mmc_packed_clean(struct mmc_queue *mq)
398{
399 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
400 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
401
402 kfree(mqrq_cur->packed);
403 mqrq_cur->packed = NULL;
404 kfree(mqrq_prev->packed);
405 mqrq_prev->packed = NULL;
406}
407
365/** 408/**
366 * mmc_queue_suspend - suspend a MMC request queue 409 * mmc_queue_suspend - suspend a MMC request queue
367 * @mq: MMC queue to suspend 410 * @mq: MMC queue to suspend
@@ -406,6 +449,41 @@ void mmc_queue_resume(struct mmc_queue *mq)
406 } 449 }
407} 450}
408 451
452static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
453 struct mmc_packed *packed,
454 struct scatterlist *sg,
455 enum mmc_packed_type cmd_type)
456{
457 struct scatterlist *__sg = sg;
458 unsigned int sg_len = 0;
459 struct request *req;
460
461 if (mmc_packed_wr(cmd_type)) {
462 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
463 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
464 unsigned int len, remain, offset = 0;
465 u8 *buf = (u8 *)packed->cmd_hdr;
466
467 remain = hdr_sz;
468 do {
469 len = min(remain, max_seg_sz);
470 sg_set_buf(__sg, buf + offset, len);
471 offset += len;
472 remain -= len;
473 (__sg++)->page_link &= ~0x02;
474 sg_len++;
475 } while (remain);
476 }
477
478 list_for_each_entry(req, &packed->list, queuelist) {
479 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
480 __sg = sg + (sg_len - 1);
481 (__sg++)->page_link &= ~0x02;
482 }
483 sg_mark_end(sg + (sg_len - 1));
484 return sg_len;
485}
486
409/* 487/*
410 * Prepare the sg list(s) to be handed of to the host driver 488 * Prepare the sg list(s) to be handed of to the host driver
411 */ 489 */
@@ -414,14 +492,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
414 unsigned int sg_len; 492 unsigned int sg_len;
415 size_t buflen; 493 size_t buflen;
416 struct scatterlist *sg; 494 struct scatterlist *sg;
495 enum mmc_packed_type cmd_type;
417 int i; 496 int i;
418 497
419 if (!mqrq->bounce_buf) 498 cmd_type = mqrq->cmd_type;
420 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); 499
500 if (!mqrq->bounce_buf) {
501 if (mmc_packed_cmd(cmd_type))
502 return mmc_queue_packed_map_sg(mq, mqrq->packed,
503 mqrq->sg, cmd_type);
504 else
505 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
506 }
421 507
422 BUG_ON(!mqrq->bounce_sg); 508 BUG_ON(!mqrq->bounce_sg);
423 509
424 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); 510 if (mmc_packed_cmd(cmd_type))
511 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
512 mqrq->bounce_sg, cmd_type);
513 else
514 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
425 515
426 mqrq->bounce_sg_len = sg_len; 516 mqrq->bounce_sg_len = sg_len;
427 517