diff options
author | Adrian Hunter <ext-adrian.hunter@nokia.com> | 2008-12-31 12:21:17 -0500 |
---|---|---|
committer | Pierre Ossman <drzeus@drzeus.cx> | 2008-12-31 12:21:17 -0500 |
commit | 6a79e391df295bd7c2aa1309ea5031f361c197fd (patch) | |
tree | 81789b2131bfd64c6478cd979bb35b4d6df94186 /drivers/mmc/card/block.c | |
parent | a0d045cac9bcb3e9a9796d596415f7ffb64852e2 (diff) |
mmc_block: ensure all sectors that do not have errors are read
If a card encounters an ECC error while reading a sector it will
timeout. Instead of reporting the entire I/O request as having
an error, redo the I/O one sector at a time so that all readable
sectors are provided to the upper layers.
Signed-off-by: Adrian Hunter <ext-adrian.hunter@nokia.com>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r-- | drivers/mmc/card/block.c | 76 |
1 files changed, 59 insertions, 17 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index cc9b3abf4a3f..45b1f430685f 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -229,7 +229,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
229 | struct mmc_blk_data *md = mq->data; | 229 | struct mmc_blk_data *md = mq->data; |
230 | struct mmc_card *card = md->queue.card; | 230 | struct mmc_card *card = md->queue.card; |
231 | struct mmc_blk_request brq; | 231 | struct mmc_blk_request brq; |
232 | int ret = 1; | 232 | int ret = 1, disable_multi = 0; |
233 | 233 | ||
234 | mmc_claim_host(card->host); | 234 | mmc_claim_host(card->host); |
235 | 235 | ||
@@ -251,6 +251,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
251 | brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | 251 | brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; |
252 | brq.data.blocks = req->nr_sectors; | 252 | brq.data.blocks = req->nr_sectors; |
253 | 253 | ||
254 | /* | ||
255 | * After a read error, we redo the request one sector at a time | ||
256 | * in order to accurately determine which sectors can be read | ||
257 | * successfully. | ||
258 | */ | ||
259 | if (disable_multi && brq.data.blocks > 1) | ||
260 | brq.data.blocks = 1; | ||
261 | |||
254 | if (brq.data.blocks > 1) { | 262 | if (brq.data.blocks > 1) { |
255 | /* SPI multiblock writes terminate using a special | 263 | /* SPI multiblock writes terminate using a special |
256 | * token, not a STOP_TRANSMISSION request. | 264 | * token, not a STOP_TRANSMISSION request. |
@@ -279,6 +287,25 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
279 | brq.data.sg = mq->sg; | 287 | brq.data.sg = mq->sg; |
280 | brq.data.sg_len = mmc_queue_map_sg(mq); | 288 | brq.data.sg_len = mmc_queue_map_sg(mq); |
281 | 289 | ||
290 | /* | ||
291 | * Adjust the sg list so it is the same size as the | ||
292 | * request. | ||
293 | */ | ||
294 | if (brq.data.blocks != req->nr_sectors) { | ||
295 | int i, data_size = brq.data.blocks << 9; | ||
296 | struct scatterlist *sg; | ||
297 | |||
298 | for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { | ||
299 | data_size -= sg->length; | ||
300 | if (data_size <= 0) { | ||
301 | sg->length += data_size; | ||
302 | i++; | ||
303 | break; | ||
304 | } | ||
305 | } | ||
306 | brq.data.sg_len = i; | ||
307 | } | ||
308 | |||
282 | mmc_queue_bounce_pre(mq); | 309 | mmc_queue_bounce_pre(mq); |
283 | 310 | ||
284 | mmc_wait_for_req(card->host, &brq.mrq); | 311 | mmc_wait_for_req(card->host, &brq.mrq); |
@@ -290,8 +317,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
290 | * until later as we need to wait for the card to leave | 317 | * until later as we need to wait for the card to leave |
291 | * programming mode even when things go wrong. | 318 | * programming mode even when things go wrong. |
292 | */ | 319 | */ |
293 | if (brq.cmd.error || brq.data.error || brq.stop.error) | 320 | if (brq.cmd.error || brq.data.error || brq.stop.error) { |
321 | if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { | ||
322 | /* Redo read one sector at a time */ | ||
323 | printk(KERN_WARNING "%s: retrying using single " | ||
324 | "block read\n", req->rq_disk->disk_name); | ||
325 | disable_multi = 1; | ||
326 | continue; | ||
327 | } | ||
294 | status = get_card_status(card, req); | 328 | status = get_card_status(card, req); |
329 | } | ||
295 | 330 | ||
296 | if (brq.cmd.error) { | 331 | if (brq.cmd.error) { |
297 | printk(KERN_ERR "%s: error %d sending read/write " | 332 | printk(KERN_ERR "%s: error %d sending read/write " |
@@ -348,8 +383,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
348 | #endif | 383 | #endif |
349 | } | 384 | } |
350 | 385 | ||
351 | if (brq.cmd.error || brq.data.error || brq.stop.error) | 386 | if (brq.cmd.error || brq.stop.error || brq.data.error) { |
387 | if (rq_data_dir(req) == READ) { | ||
388 | /* | ||
389 | * After an error, we redo I/O one sector at a | ||
390 | * time, so we only reach here after trying to | ||
391 | * read a single sector. | ||
392 | */ | ||
393 | spin_lock_irq(&md->lock); | ||
394 | ret = __blk_end_request(req, -EIO, brq.data.blksz); | ||
395 | spin_unlock_irq(&md->lock); | ||
396 | continue; | ||
397 | } | ||
352 | goto cmd_err; | 398 | goto cmd_err; |
399 | } | ||
353 | 400 | ||
354 | /* | 401 | /* |
355 | * A block was successfully transferred. | 402 | * A block was successfully transferred. |
@@ -371,25 +418,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
371 | * If the card is not SD, we can still ok written sectors | 418 | * If the card is not SD, we can still ok written sectors |
372 | * as reported by the controller (which might be less than | 419 | * as reported by the controller (which might be less than |
373 | * the real number of written sectors, but never more). | 420 | * the real number of written sectors, but never more). |
374 | * | ||
375 | * For reads we just fail the entire chunk as that should | ||
376 | * be safe in all cases. | ||
377 | */ | 421 | */ |
378 | if (rq_data_dir(req) != READ) { | 422 | if (mmc_card_sd(card)) { |
379 | if (mmc_card_sd(card)) { | 423 | u32 blocks; |
380 | u32 blocks; | ||
381 | 424 | ||
382 | blocks = mmc_sd_num_wr_blocks(card); | 425 | blocks = mmc_sd_num_wr_blocks(card); |
383 | if (blocks != (u32)-1) { | 426 | if (blocks != (u32)-1) { |
384 | spin_lock_irq(&md->lock); | ||
385 | ret = __blk_end_request(req, 0, blocks << 9); | ||
386 | spin_unlock_irq(&md->lock); | ||
387 | } | ||
388 | } else { | ||
389 | spin_lock_irq(&md->lock); | 427 | spin_lock_irq(&md->lock); |
390 | ret = __blk_end_request(req, 0, brq.data.bytes_xfered); | 428 | ret = __blk_end_request(req, 0, blocks << 9); |
391 | spin_unlock_irq(&md->lock); | 429 | spin_unlock_irq(&md->lock); |
392 | } | 430 | } |
431 | } else { | ||
432 | spin_lock_irq(&md->lock); | ||
433 | ret = __blk_end_request(req, 0, brq.data.bytes_xfered); | ||
434 | spin_unlock_irq(&md->lock); | ||
393 | } | 435 | } |
394 | 436 | ||
395 | mmc_release_host(card->host); | 437 | mmc_release_host(card->host); |