diff options
author | Andrei Warkentin <andreiw@motorola.com> | 2011-03-31 19:40:00 -0400 |
---|---|---|
committer | Chris Ball <cjb@laptop.org> | 2011-05-24 20:59:38 -0400 |
commit | f4c5522b0a8827f39f83f928961d87e081bfe71c (patch) | |
tree | 3c2126f0adb2a3444b2fd152cc1a880c192c9d77 /drivers/mmc/card | |
parent | 766a6bf6e987ff5f5085c614b5a62a55006b6a7e (diff) |
mmc: Reliable write support.
Allows reliable writes to be used for MMC writes. Reliable writes are used
to service write REQ_FUA/REQ_META requests. Handles both the legacy and
the enhanced reliable write support in MMC cards.
Signed-off-by: Andrei Warkentin <andreiw@motorola.com>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r-- | drivers/mmc/card/block.c | 81 |
1 files changed, 77 insertions, 4 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 61d233a7c118..91a676773608 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -48,6 +48,10 @@ MODULE_ALIAS("mmc:block"); | |||
48 | #endif | 48 | #endif |
49 | #define MODULE_PARAM_PREFIX "mmcblk." | 49 | #define MODULE_PARAM_PREFIX "mmcblk." |
50 | 50 | ||
51 | #define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \ | ||
52 | (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \ | ||
53 | ((card)->ext_csd.rel_sectors))) | ||
54 | |||
51 | static DEFINE_MUTEX(block_mutex); | 55 | static DEFINE_MUTEX(block_mutex); |
52 | 56 | ||
53 | /* | 57 | /* |
@@ -331,6 +335,57 @@ out: | |||
331 | return err ? 0 : 1; | 335 | return err ? 0 : 1; |
332 | } | 336 | } |
333 | 337 | ||
338 | static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) | ||
339 | { | ||
340 | struct mmc_blk_data *md = mq->data; | ||
341 | |||
342 | /* | ||
343 | * No-op, only service this because we need REQ_FUA for reliable | ||
344 | * writes. | ||
345 | */ | ||
346 | spin_lock_irq(&md->lock); | ||
347 | __blk_end_request_all(req, 0); | ||
348 | spin_unlock_irq(&md->lock); | ||
349 | |||
350 | return 1; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Reformat current write as a reliable write, supporting | ||
355 | * both legacy and the enhanced reliable write MMC cards. | ||
356 | * In each transfer we'll handle only as much as a single | ||
357 | * reliable write can handle, thus finish the request in | ||
358 | * partial completions. | ||
359 | */ | ||
360 | static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq, | ||
361 | struct mmc_card *card, | ||
362 | struct request *req) | ||
363 | { | ||
364 | int err; | ||
365 | struct mmc_command set_count; | ||
366 | |||
367 | if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { | ||
368 | /* Legacy mode imposes restrictions on transfers. */ | ||
369 | if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) | ||
370 | brq->data.blocks = 1; | ||
371 | |||
372 | if (brq->data.blocks > card->ext_csd.rel_sectors) | ||
373 | brq->data.blocks = card->ext_csd.rel_sectors; | ||
374 | else if (brq->data.blocks < card->ext_csd.rel_sectors) | ||
375 | brq->data.blocks = 1; | ||
376 | } | ||
377 | |||
378 | memset(&set_count, 0, sizeof(struct mmc_command)); | ||
379 | set_count.opcode = MMC_SET_BLOCK_COUNT; | ||
380 | set_count.arg = brq->data.blocks | (1 << 31); | ||
381 | set_count.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
382 | err = mmc_wait_for_cmd(card->host, &set_count, 0); | ||
383 | if (err) | ||
384 | printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n", | ||
385 | req->rq_disk->disk_name, err); | ||
386 | return err; | ||
387 | } | ||
388 | |||
334 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | 389 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) |
335 | { | 390 | { |
336 | struct mmc_blk_data *md = mq->data; | 391 | struct mmc_blk_data *md = mq->data; |
@@ -338,6 +393,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |||
338 | struct mmc_blk_request brq; | 393 | struct mmc_blk_request brq; |
339 | int ret = 1, disable_multi = 0; | 394 | int ret = 1, disable_multi = 0; |
340 | 395 | ||
396 | /* | ||
397 | * Reliable writes are used to implement Forced Unit Access and | ||
398 | * REQ_META accesses, and are supported only on MMCs. | ||
399 | */ | ||
400 | bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || | ||
401 | (req->cmd_flags & REQ_META)) && | ||
402 | (rq_data_dir(req) == WRITE) && | ||
403 | REL_WRITES_SUPPORTED(card); | ||
404 | |||
341 | mmc_claim_host(card->host); | 405 | mmc_claim_host(card->host); |
342 | 406 | ||
343 | do { | 407 | do { |
@@ -374,12 +438,14 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |||
374 | if (disable_multi && brq.data.blocks > 1) | 438 | if (disable_multi && brq.data.blocks > 1) |
375 | brq.data.blocks = 1; | 439 | brq.data.blocks = 1; |
376 | 440 | ||
377 | if (brq.data.blocks > 1) { | 441 | if (brq.data.blocks > 1 || do_rel_wr) { |
378 | /* SPI multiblock writes terminate using a special | 442 | /* SPI multiblock writes terminate using a special |
379 | * token, not a STOP_TRANSMISSION request. | 443 | * token, not a STOP_TRANSMISSION request. Reliable |
444 | * writes use SET_BLOCK_COUNT and do not use a | ||
445 | * STOP_TRANSMISSION request either. | ||
380 | */ | 446 | */ |
381 | if (!mmc_host_is_spi(card->host) | 447 | if ((!mmc_host_is_spi(card->host) && !do_rel_wr) || |
382 | || rq_data_dir(req) == READ) | 448 | rq_data_dir(req) == READ) |
383 | brq.mrq.stop = &brq.stop; | 449 | brq.mrq.stop = &brq.stop; |
384 | readcmd = MMC_READ_MULTIPLE_BLOCK; | 450 | readcmd = MMC_READ_MULTIPLE_BLOCK; |
385 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | 451 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; |
@@ -396,6 +462,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |||
396 | brq.data.flags |= MMC_DATA_WRITE; | 462 | brq.data.flags |= MMC_DATA_WRITE; |
397 | } | 463 | } |
398 | 464 | ||
465 | if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req)) | ||
466 | goto cmd_err; | ||
467 | |||
399 | mmc_set_data_timeout(&brq.data, card); | 468 | mmc_set_data_timeout(&brq.data, card); |
400 | 469 | ||
401 | brq.data.sg = mq->sg; | 470 | brq.data.sg = mq->sg; |
@@ -565,6 +634,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
565 | return mmc_blk_issue_secdiscard_rq(mq, req); | 634 | return mmc_blk_issue_secdiscard_rq(mq, req); |
566 | else | 635 | else |
567 | return mmc_blk_issue_discard_rq(mq, req); | 636 | return mmc_blk_issue_discard_rq(mq, req); |
637 | } else if (req->cmd_flags & REQ_FLUSH) { | ||
638 | return mmc_blk_issue_flush(mq, req); | ||
568 | } else { | 639 | } else { |
569 | return mmc_blk_issue_rw_rq(mq, req); | 640 | return mmc_blk_issue_rw_rq(mq, req); |
570 | } | 641 | } |
@@ -622,6 +693,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) | |||
622 | md->disk->queue = md->queue.queue; | 693 | md->disk->queue = md->queue.queue; |
623 | md->disk->driverfs_dev = &card->dev; | 694 | md->disk->driverfs_dev = &card->dev; |
624 | set_disk_ro(md->disk, md->read_only); | 695 | set_disk_ro(md->disk, md->read_only); |
696 | if (REL_WRITES_SUPPORTED(card)) | ||
697 | blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); | ||
625 | 698 | ||
626 | /* | 699 | /* |
627 | * As discussed on lkml, GENHD_FL_REMOVABLE should: | 700 | * As discussed on lkml, GENHD_FL_REMOVABLE should: |