aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrei Warkentin <andreiw@motorola.com>2011-03-31 19:40:00 -0400
committerChris Ball <cjb@laptop.org>2011-05-24 20:59:38 -0400
commitf4c5522b0a8827f39f83f928961d87e081bfe71c (patch)
tree3c2126f0adb2a3444b2fd152cc1a880c192c9d77
parent766a6bf6e987ff5f5085c614b5a62a55006b6a7e (diff)
mmc: Reliable write support.
Allows reliable writes to be used for MMC writes. Reliable writes are used to service write REQ_FUA/REQ_META requests. Handles both the legacy and the enhanced reliable write support in MMC cards. Signed-off-by: Andrei Warkentin <andreiw@motorola.com> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Chris Ball <cjb@laptop.org>
-rw-r--r--drivers/mmc/card/block.c81
-rw-r--r--drivers/mmc/core/mmc.c5
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/mmc.h4
4 files changed, 88 insertions, 4 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 61d233a7c118..91a676773608 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -48,6 +48,10 @@ MODULE_ALIAS("mmc:block");
48#endif 48#endif
49#define MODULE_PARAM_PREFIX "mmcblk." 49#define MODULE_PARAM_PREFIX "mmcblk."
50 50
51#define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \
52 (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \
53 ((card)->ext_csd.rel_sectors)))
54
51static DEFINE_MUTEX(block_mutex); 55static DEFINE_MUTEX(block_mutex);
52 56
53/* 57/*
@@ -331,6 +335,57 @@ out:
331 return err ? 0 : 1; 335 return err ? 0 : 1;
332} 336}
333 337
338static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
339{
340 struct mmc_blk_data *md = mq->data;
341
342 /*
343 * No-op, only service this because we need REQ_FUA for reliable
344 * writes.
345 */
346 spin_lock_irq(&md->lock);
347 __blk_end_request_all(req, 0);
348 spin_unlock_irq(&md->lock);
349
350 return 1;
351}
352
353/*
354 * Reformat current write as a reliable write, supporting
355 * both legacy and the enhanced reliable write MMC cards.
356 * In each transfer we'll handle only as much as a single
357 * reliable write can handle, thus finish the request in
358 * partial completions.
359 */
360static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
361 struct mmc_card *card,
362 struct request *req)
363{
364 int err;
365 struct mmc_command set_count;
366
367 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
368 /* Legacy mode imposes restrictions on transfers. */
369 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
370 brq->data.blocks = 1;
371
372 if (brq->data.blocks > card->ext_csd.rel_sectors)
373 brq->data.blocks = card->ext_csd.rel_sectors;
374 else if (brq->data.blocks < card->ext_csd.rel_sectors)
375 brq->data.blocks = 1;
376 }
377
378 memset(&set_count, 0, sizeof(struct mmc_command));
379 set_count.opcode = MMC_SET_BLOCK_COUNT;
380 set_count.arg = brq->data.blocks | (1 << 31);
381 set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
382 err = mmc_wait_for_cmd(card->host, &set_count, 0);
383 if (err)
384 printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
385 req->rq_disk->disk_name, err);
386 return err;
387}
388
334static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 389static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
335{ 390{
336 struct mmc_blk_data *md = mq->data; 391 struct mmc_blk_data *md = mq->data;
@@ -338,6 +393,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
338 struct mmc_blk_request brq; 393 struct mmc_blk_request brq;
339 int ret = 1, disable_multi = 0; 394 int ret = 1, disable_multi = 0;
340 395
396 /*
397 * Reliable writes are used to implement Forced Unit Access and
398 * REQ_META accesses, and are supported only on MMCs.
399 */
400 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
401 (req->cmd_flags & REQ_META)) &&
402 (rq_data_dir(req) == WRITE) &&
403 REL_WRITES_SUPPORTED(card);
404
341 mmc_claim_host(card->host); 405 mmc_claim_host(card->host);
342 406
343 do { 407 do {
@@ -374,12 +438,14 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
374 if (disable_multi && brq.data.blocks > 1) 438 if (disable_multi && brq.data.blocks > 1)
375 brq.data.blocks = 1; 439 brq.data.blocks = 1;
376 440
377 if (brq.data.blocks > 1) { 441 if (brq.data.blocks > 1 || do_rel_wr) {
378 /* SPI multiblock writes terminate using a special 442 /* SPI multiblock writes terminate using a special
379 * token, not a STOP_TRANSMISSION request. 443 * token, not a STOP_TRANSMISSION request. Reliable
444 * writes use SET_BLOCK_COUNT and do not use a
445 * STOP_TRANSMISSION request either.
380 */ 446 */
381 if (!mmc_host_is_spi(card->host) 447 if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
382 || rq_data_dir(req) == READ) 448 rq_data_dir(req) == READ)
383 brq.mrq.stop = &brq.stop; 449 brq.mrq.stop = &brq.stop;
384 readcmd = MMC_READ_MULTIPLE_BLOCK; 450 readcmd = MMC_READ_MULTIPLE_BLOCK;
385 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 451 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
@@ -396,6 +462,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
396 brq.data.flags |= MMC_DATA_WRITE; 462 brq.data.flags |= MMC_DATA_WRITE;
397 } 463 }
398 464
465 if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
466 goto cmd_err;
467
399 mmc_set_data_timeout(&brq.data, card); 468 mmc_set_data_timeout(&brq.data, card);
400 469
401 brq.data.sg = mq->sg; 470 brq.data.sg = mq->sg;
@@ -565,6 +634,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
565 return mmc_blk_issue_secdiscard_rq(mq, req); 634 return mmc_blk_issue_secdiscard_rq(mq, req);
566 else 635 else
567 return mmc_blk_issue_discard_rq(mq, req); 636 return mmc_blk_issue_discard_rq(mq, req);
637 } else if (req->cmd_flags & REQ_FLUSH) {
638 return mmc_blk_issue_flush(mq, req);
568 } else { 639 } else {
569 return mmc_blk_issue_rw_rq(mq, req); 640 return mmc_blk_issue_rw_rq(mq, req);
570 } 641 }
@@ -622,6 +693,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
622 md->disk->queue = md->queue.queue; 693 md->disk->queue = md->queue.queue;
623 md->disk->driverfs_dev = &card->dev; 694 md->disk->driverfs_dev = &card->dev;
624 set_disk_ro(md->disk, md->read_only); 695 set_disk_ro(md->disk, md->read_only);
696 if (REL_WRITES_SUPPORTED(card))
697 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
625 698
626 /* 699 /*
627 * As discussed on lkml, GENHD_FL_REMOVABLE should: 700 * As discussed on lkml, GENHD_FL_REMOVABLE should:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 5c611a6e0080..ae6b8fd38800 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -300,6 +300,8 @@ static int mmc_read_ext_csd(struct mmc_card *card)
300 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 300 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
301 card->ext_csd.hc_erase_size = 301 card->ext_csd.hc_erase_size =
302 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; 302 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
303
304 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
303 } 305 }
304 306
305 if (card->ext_csd.rev >= 4) { 307 if (card->ext_csd.rev >= 4) {
@@ -351,6 +353,9 @@ static int mmc_read_ext_csd(struct mmc_card *card)
351 ext_csd[EXT_CSD_TRIM_MULT]; 353 ext_csd[EXT_CSD_TRIM_MULT];
352 } 354 }
353 355
356 if (card->ext_csd.rev >= 5)
357 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
358
354 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 359 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
355 card->erased_byte = 0xFF; 360 card->erased_byte = 0xFF;
356 else 361 else
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 557b73263390..c4e96fa5fb2b 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -45,6 +45,8 @@ struct mmc_ext_csd {
45 u8 rev; 45 u8 rev;
46 u8 erase_group_def; 46 u8 erase_group_def;
47 u8 sec_feature_support; 47 u8 sec_feature_support;
48 u8 rel_sectors;
49 u8 rel_param;
48 u8 bootconfig; 50 u8 bootconfig;
49 unsigned int sa_timeout; /* Units: 100ns */ 51 unsigned int sa_timeout; /* Units: 100ns */
50 unsigned int hs_max_dtr; 52 unsigned int hs_max_dtr;
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index b5ec88fd1352..390aa6eef676 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -255,6 +255,7 @@ struct _mmc_csd {
255 255
256#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ 256#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
257#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ 257#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
258#define EXT_CSD_WR_REL_PARAM 166 /* RO */
258#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ 259#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
259#define EXT_CSD_BOOT_CONFIG 179 /* R/W */ 260#define EXT_CSD_BOOT_CONFIG 179 /* R/W */
260#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ 261#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
@@ -265,6 +266,7 @@ struct _mmc_csd {
265#define EXT_CSD_CARD_TYPE 196 /* RO */ 266#define EXT_CSD_CARD_TYPE 196 /* RO */
266#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ 267#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */
267#define EXT_CSD_S_A_TIMEOUT 217 /* RO */ 268#define EXT_CSD_S_A_TIMEOUT 217 /* RO */
269#define EXT_CSD_REL_WR_SEC_C 222 /* RO */
268#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */ 270#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */
269#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */ 271#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */
270#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */ 272#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */
@@ -277,6 +279,8 @@ struct _mmc_csd {
277 * EXT_CSD field definitions 279 * EXT_CSD field definitions
278 */ 280 */
279 281
282#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
283
280#define EXT_CSD_CMD_SET_NORMAL (1<<0) 284#define EXT_CSD_CMD_SET_NORMAL (1<<0)
281#define EXT_CSD_CMD_SET_SECURE (1<<1) 285#define EXT_CSD_CMD_SET_SECURE (1<<1)
282#define EXT_CSD_CMD_SET_CPSECURE (1<<2) 286#define EXT_CSD_CMD_SET_CPSECURE (1<<2)