aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
authorPierre Ossman <drzeus@drzeus.cx>2008-07-05 19:10:27 -0400
committerPierre Ossman <drzeus@drzeus.cx>2008-07-15 08:14:49 -0400
commit23af60398af2f5033e2f53665538a09f498dbc03 (patch)
treefc244139d7d6bb1a82b80cc60fae86970e60c8c5 /drivers/mmc/card/block.c
parent97067d5581ec831a75a45a52e417bee0f7943dbf (diff)
mmc: remove multiwrite capability
Relax requirements on host controllers and only require that they do not report a transfer count than is larger than the actual one (i.e. a lower value is okay). This is how many other parts of the kernel behaves so upper layers should already be prepared to handle that scenario. This gives us a performance boost on MMC cards. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c47
1 files changed, 19 insertions, 28 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 4b0f8220f153..66e5a5487c20 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -237,17 +237,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
237 if (brq.data.blocks > card->host->max_blk_count) 237 if (brq.data.blocks > card->host->max_blk_count)
238 brq.data.blocks = card->host->max_blk_count; 238 brq.data.blocks = card->host->max_blk_count;
239 239
240 /*
241 * If the host doesn't support multiple block writes, force
242 * block writes to single block. SD cards are excepted from
243 * this rule as they support querying the number of
244 * successfully written sectors.
245 */
246 if (rq_data_dir(req) != READ &&
247 !(card->host->caps & MMC_CAP_MULTIWRITE) &&
248 !mmc_card_sd(card))
249 brq.data.blocks = 1;
250
251 if (brq.data.blocks > 1) { 240 if (brq.data.blocks > 1) {
252 /* SPI multiblock writes terminate using a special 241 /* SPI multiblock writes terminate using a special
253 * token, not a STOP_TRANSMISSION request. 242 * token, not a STOP_TRANSMISSION request.
@@ -367,30 +356,32 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
367 * mark the known good sectors as ok. 356 * mark the known good sectors as ok.
368 * 357 *
369 * If the card is not SD, we can still ok written sectors 358 * If the card is not SD, we can still ok written sectors
370 * if the controller can do proper error reporting. 359 * as reported by the controller (which might be less than
360 * the real number of written sectors, but never more).
371 * 361 *
372 * For reads we just fail the entire chunk as that should 362 * For reads we just fail the entire chunk as that should
373 * be safe in all cases. 363 * be safe in all cases.
374 */ 364 */
375 if (rq_data_dir(req) != READ && mmc_card_sd(card)) { 365 if (rq_data_dir(req) != READ) {
376 u32 blocks; 366 if (mmc_card_sd(card)) {
377 unsigned int bytes; 367 u32 blocks;
378 368 unsigned int bytes;
379 blocks = mmc_sd_num_wr_blocks(card); 369
380 if (blocks != (u32)-1) { 370 blocks = mmc_sd_num_wr_blocks(card);
381 if (card->csd.write_partial) 371 if (blocks != (u32)-1) {
382 bytes = blocks << md->block_bits; 372 if (card->csd.write_partial)
383 else 373 bytes = blocks << md->block_bits;
384 bytes = blocks << 9; 374 else
375 bytes = blocks << 9;
376 spin_lock_irq(&md->lock);
377 ret = __blk_end_request(req, 0, bytes);
378 spin_unlock_irq(&md->lock);
379 }
380 } else {
385 spin_lock_irq(&md->lock); 381 spin_lock_irq(&md->lock);
386 ret = __blk_end_request(req, 0, bytes); 382 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
387 spin_unlock_irq(&md->lock); 383 spin_unlock_irq(&md->lock);
388 } 384 }
389 } else if (rq_data_dir(req) != READ &&
390 (card->host->caps & MMC_CAP_MULTIWRITE)) {
391 spin_lock_irq(&md->lock);
392 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
393 spin_unlock_irq(&md->lock);
394 } 385 }
395 386
396 mmc_release_host(card->host); 387 mmc_release_host(card->host);