aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
authorKiyoshi Ueda <k-ueda@ct.jp.nec.com>2007-12-11 17:48:29 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-01-28 04:36:52 -0500
commitfd539832c7d3a242269374dbcae2cd54da150930 (patch)
tree78366d0180f3443c591c3dc611b0beeb5657dff1 /drivers/mmc/card/block.c
parent1381b7e82a52c4ae5ebb2ac0951075debb878a2a (diff)
blk_end_request: changing mmc (take 4)
This patch converts mmc to use blk_end_request interfaces. Related 'uptodate' arguments are converted to 'error'. Cc: Pierre Ossman <drzeus-mmc@drzeus.cx> Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c24
1 files changed, 5 insertions, 19 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index aeb32a93f6a0..91ded3e82401 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -348,15 +348,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
348 * A block was successfully transferred. 348 * A block was successfully transferred.
349 */ 349 */
350 spin_lock_irq(&md->lock); 350 spin_lock_irq(&md->lock);
351 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); 351 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
352 if (!ret) {
353 /*
354 * The whole request completed successfully.
355 */
356 add_disk_randomness(req->rq_disk);
357 blkdev_dequeue_request(req);
358 end_that_request_last(req, 1);
359 }
360 spin_unlock_irq(&md->lock); 352 spin_unlock_irq(&md->lock);
361 } while (ret); 353 } while (ret);
362 354
@@ -386,27 +378,21 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
386 else 378 else
387 bytes = blocks << 9; 379 bytes = blocks << 9;
388 spin_lock_irq(&md->lock); 380 spin_lock_irq(&md->lock);
389 ret = end_that_request_chunk(req, 1, bytes); 381 ret = __blk_end_request(req, 0, bytes);
390 spin_unlock_irq(&md->lock); 382 spin_unlock_irq(&md->lock);
391 } 383 }
392 } else if (rq_data_dir(req) != READ && 384 } else if (rq_data_dir(req) != READ &&
393 (card->host->caps & MMC_CAP_MULTIWRITE)) { 385 (card->host->caps & MMC_CAP_MULTIWRITE)) {
394 spin_lock_irq(&md->lock); 386 spin_lock_irq(&md->lock);
395 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); 387 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
396 spin_unlock_irq(&md->lock); 388 spin_unlock_irq(&md->lock);
397 } 389 }
398 390
399 mmc_release_host(card->host); 391 mmc_release_host(card->host);
400 392
401 spin_lock_irq(&md->lock); 393 spin_lock_irq(&md->lock);
402 while (ret) { 394 while (ret)
403 ret = end_that_request_chunk(req, 0, 395 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
404 req->current_nr_sectors << 9);
405 }
406
407 add_disk_randomness(req->rq_disk);
408 blkdev_dequeue_request(req);
409 end_that_request_last(req, 0);
410 spin_unlock_irq(&md->lock); 396 spin_unlock_irq(&md->lock);
411 397
412 return 0; 398 return 0;