diff options
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r-- | drivers/mmc/card/block.c | 122 |
1 files changed, 96 insertions, 26 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 3d067c35185d..45b1f430685f 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -145,7 +145,7 @@ struct mmc_blk_request { | |||
145 | static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) | 145 | static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) |
146 | { | 146 | { |
147 | int err; | 147 | int err; |
148 | u32 blocks; | 148 | __be32 blocks; |
149 | 149 | ||
150 | struct mmc_request mrq; | 150 | struct mmc_request mrq; |
151 | struct mmc_command cmd; | 151 | struct mmc_command cmd; |
@@ -204,9 +204,24 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) | |||
204 | if (cmd.error || data.error) | 204 | if (cmd.error || data.error) |
205 | return (u32)-1; | 205 | return (u32)-1; |
206 | 206 | ||
207 | blocks = ntohl(blocks); | 207 | return ntohl(blocks); |
208 | } | ||
209 | |||
210 | static u32 get_card_status(struct mmc_card *card, struct request *req) | ||
211 | { | ||
212 | struct mmc_command cmd; | ||
213 | int err; | ||
208 | 214 | ||
209 | return blocks; | 215 | memset(&cmd, 0, sizeof(struct mmc_command)); |
216 | cmd.opcode = MMC_SEND_STATUS; | ||
217 | if (!mmc_host_is_spi(card->host)) | ||
218 | cmd.arg = card->rca << 16; | ||
219 | cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; | ||
220 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | ||
221 | if (err) | ||
222 | printk(KERN_ERR "%s: error %d sending status comand", | ||
223 | req->rq_disk->disk_name, err); | ||
224 | return cmd.resp[0]; | ||
210 | } | 225 | } |
211 | 226 | ||
212 | static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | 227 | static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) |
@@ -214,13 +229,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
214 | struct mmc_blk_data *md = mq->data; | 229 | struct mmc_blk_data *md = mq->data; |
215 | struct mmc_card *card = md->queue.card; | 230 | struct mmc_card *card = md->queue.card; |
216 | struct mmc_blk_request brq; | 231 | struct mmc_blk_request brq; |
217 | int ret = 1; | 232 | int ret = 1, disable_multi = 0; |
218 | 233 | ||
219 | mmc_claim_host(card->host); | 234 | mmc_claim_host(card->host); |
220 | 235 | ||
221 | do { | 236 | do { |
222 | struct mmc_command cmd; | 237 | struct mmc_command cmd; |
223 | u32 readcmd, writecmd; | 238 | u32 readcmd, writecmd, status = 0; |
224 | 239 | ||
225 | memset(&brq, 0, sizeof(struct mmc_blk_request)); | 240 | memset(&brq, 0, sizeof(struct mmc_blk_request)); |
226 | brq.mrq.cmd = &brq.cmd; | 241 | brq.mrq.cmd = &brq.cmd; |
@@ -236,6 +251,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
236 | brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | 251 | brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; |
237 | brq.data.blocks = req->nr_sectors; | 252 | brq.data.blocks = req->nr_sectors; |
238 | 253 | ||
254 | /* | ||
255 | * After a read error, we redo the request one sector at a time | ||
256 | * in order to accurately determine which sectors can be read | ||
257 | * successfully. | ||
258 | */ | ||
259 | if (disable_multi && brq.data.blocks > 1) | ||
260 | brq.data.blocks = 1; | ||
261 | |||
239 | if (brq.data.blocks > 1) { | 262 | if (brq.data.blocks > 1) { |
240 | /* SPI multiblock writes terminate using a special | 263 | /* SPI multiblock writes terminate using a special |
241 | * token, not a STOP_TRANSMISSION request. | 264 | * token, not a STOP_TRANSMISSION request. |
@@ -264,6 +287,25 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
264 | brq.data.sg = mq->sg; | 287 | brq.data.sg = mq->sg; |
265 | brq.data.sg_len = mmc_queue_map_sg(mq); | 288 | brq.data.sg_len = mmc_queue_map_sg(mq); |
266 | 289 | ||
290 | /* | ||
291 | * Adjust the sg list so it is the same size as the | ||
292 | * request. | ||
293 | */ | ||
294 | if (brq.data.blocks != req->nr_sectors) { | ||
295 | int i, data_size = brq.data.blocks << 9; | ||
296 | struct scatterlist *sg; | ||
297 | |||
298 | for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { | ||
299 | data_size -= sg->length; | ||
300 | if (data_size <= 0) { | ||
301 | sg->length += data_size; | ||
302 | i++; | ||
303 | break; | ||
304 | } | ||
305 | } | ||
306 | brq.data.sg_len = i; | ||
307 | } | ||
308 | |||
267 | mmc_queue_bounce_pre(mq); | 309 | mmc_queue_bounce_pre(mq); |
268 | 310 | ||
269 | mmc_wait_for_req(card->host, &brq.mrq); | 311 | mmc_wait_for_req(card->host, &brq.mrq); |
@@ -275,19 +317,40 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
275 | * until later as we need to wait for the card to leave | 317 | * until later as we need to wait for the card to leave |
276 | * programming mode even when things go wrong. | 318 | * programming mode even when things go wrong. |
277 | */ | 319 | */ |
320 | if (brq.cmd.error || brq.data.error || brq.stop.error) { | ||
321 | if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { | ||
322 | /* Redo read one sector at a time */ | ||
323 | printk(KERN_WARNING "%s: retrying using single " | ||
324 | "block read\n", req->rq_disk->disk_name); | ||
325 | disable_multi = 1; | ||
326 | continue; | ||
327 | } | ||
328 | status = get_card_status(card, req); | ||
329 | } | ||
330 | |||
278 | if (brq.cmd.error) { | 331 | if (brq.cmd.error) { |
279 | printk(KERN_ERR "%s: error %d sending read/write command\n", | 332 | printk(KERN_ERR "%s: error %d sending read/write " |
280 | req->rq_disk->disk_name, brq.cmd.error); | 333 | "command, response %#x, card status %#x\n", |
334 | req->rq_disk->disk_name, brq.cmd.error, | ||
335 | brq.cmd.resp[0], status); | ||
281 | } | 336 | } |
282 | 337 | ||
283 | if (brq.data.error) { | 338 | if (brq.data.error) { |
284 | printk(KERN_ERR "%s: error %d transferring data\n", | 339 | if (brq.data.error == -ETIMEDOUT && brq.mrq.stop) |
285 | req->rq_disk->disk_name, brq.data.error); | 340 | /* 'Stop' response contains card status */ |
341 | status = brq.mrq.stop->resp[0]; | ||
342 | printk(KERN_ERR "%s: error %d transferring data," | ||
343 | " sector %u, nr %u, card status %#x\n", | ||
344 | req->rq_disk->disk_name, brq.data.error, | ||
345 | (unsigned)req->sector, | ||
346 | (unsigned)req->nr_sectors, status); | ||
286 | } | 347 | } |
287 | 348 | ||
288 | if (brq.stop.error) { | 349 | if (brq.stop.error) { |
289 | printk(KERN_ERR "%s: error %d sending stop command\n", | 350 | printk(KERN_ERR "%s: error %d sending stop command, " |
290 | req->rq_disk->disk_name, brq.stop.error); | 351 | "response %#x, card status %#x\n", |
352 | req->rq_disk->disk_name, brq.stop.error, | ||
353 | brq.stop.resp[0], status); | ||
291 | } | 354 | } |
292 | 355 | ||
293 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { | 356 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { |
@@ -320,8 +383,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
320 | #endif | 383 | #endif |
321 | } | 384 | } |
322 | 385 | ||
323 | if (brq.cmd.error || brq.data.error || brq.stop.error) | 386 | if (brq.cmd.error || brq.stop.error || brq.data.error) { |
387 | if (rq_data_dir(req) == READ) { | ||
388 | /* | ||
389 | * After an error, we redo I/O one sector at a | ||
390 | * time, so we only reach here after trying to | ||
391 | * read a single sector. | ||
392 | */ | ||
393 | spin_lock_irq(&md->lock); | ||
394 | ret = __blk_end_request(req, -EIO, brq.data.blksz); | ||
395 | spin_unlock_irq(&md->lock); | ||
396 | continue; | ||
397 | } | ||
324 | goto cmd_err; | 398 | goto cmd_err; |
399 | } | ||
325 | 400 | ||
326 | /* | 401 | /* |
327 | * A block was successfully transferred. | 402 | * A block was successfully transferred. |
@@ -343,25 +418,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
343 | * If the card is not SD, we can still ok written sectors | 418 | * If the card is not SD, we can still ok written sectors |
344 | * as reported by the controller (which might be less than | 419 | * as reported by the controller (which might be less than |
345 | * the real number of written sectors, but never more). | 420 | * the real number of written sectors, but never more). |
346 | * | ||
347 | * For reads we just fail the entire chunk as that should | ||
348 | * be safe in all cases. | ||
349 | */ | 421 | */ |
350 | if (rq_data_dir(req) != READ) { | 422 | if (mmc_card_sd(card)) { |
351 | if (mmc_card_sd(card)) { | 423 | u32 blocks; |
352 | u32 blocks; | ||
353 | 424 | ||
354 | blocks = mmc_sd_num_wr_blocks(card); | 425 | blocks = mmc_sd_num_wr_blocks(card); |
355 | if (blocks != (u32)-1) { | 426 | if (blocks != (u32)-1) { |
356 | spin_lock_irq(&md->lock); | ||
357 | ret = __blk_end_request(req, 0, blocks << 9); | ||
358 | spin_unlock_irq(&md->lock); | ||
359 | } | ||
360 | } else { | ||
361 | spin_lock_irq(&md->lock); | 427 | spin_lock_irq(&md->lock); |
362 | ret = __blk_end_request(req, 0, brq.data.bytes_xfered); | 428 | ret = __blk_end_request(req, 0, blocks << 9); |
363 | spin_unlock_irq(&md->lock); | 429 | spin_unlock_irq(&md->lock); |
364 | } | 430 | } |
431 | } else { | ||
432 | spin_lock_irq(&md->lock); | ||
433 | ret = __blk_end_request(req, 0, brq.data.bytes_xfered); | ||
434 | spin_unlock_irq(&md->lock); | ||
365 | } | 435 | } |
366 | 436 | ||
367 | mmc_release_host(card->host); | 437 | mmc_release_host(card->host); |