aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c60
1 files changed, 28 insertions, 32 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f9ad960d7c1a..66e5a5487c20 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2,7 +2,7 @@
2 * Block driver for media (i.e., flash cards) 2 * Block driver for media (i.e., flash cards)
3 * 3 *
4 * Copyright 2002 Hewlett-Packard Company 4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2007 Pierre Ossman 5 * Copyright 2005-2008 Pierre Ossman
6 * 6 *
7 * Use consistent with the GNU GPL is permitted, 7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is 8 * provided that this copyright notice is
@@ -237,17 +237,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
237 if (brq.data.blocks > card->host->max_blk_count) 237 if (brq.data.blocks > card->host->max_blk_count)
238 brq.data.blocks = card->host->max_blk_count; 238 brq.data.blocks = card->host->max_blk_count;
239 239
240 /*
241 * If the host doesn't support multiple block writes, force
242 * block writes to single block. SD cards are excepted from
243 * this rule as they support querying the number of
244 * successfully written sectors.
245 */
246 if (rq_data_dir(req) != READ &&
247 !(card->host->caps & MMC_CAP_MULTIWRITE) &&
248 !mmc_card_sd(card))
249 brq.data.blocks = 1;
250
251 if (brq.data.blocks > 1) { 240 if (brq.data.blocks > 1) {
252 /* SPI multiblock writes terminate using a special 241 /* SPI multiblock writes terminate using a special
253 * token, not a STOP_TRANSMISSION request. 242 * token, not a STOP_TRANSMISSION request.
@@ -296,22 +285,24 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
296 285
297 mmc_queue_bounce_post(mq); 286 mmc_queue_bounce_post(mq);
298 287
288 /*
289 * Check for errors here, but don't jump to cmd_err
290 * until later as we need to wait for the card to leave
291 * programming mode even when things go wrong.
292 */
299 if (brq.cmd.error) { 293 if (brq.cmd.error) {
300 printk(KERN_ERR "%s: error %d sending read/write command\n", 294 printk(KERN_ERR "%s: error %d sending read/write command\n",
301 req->rq_disk->disk_name, brq.cmd.error); 295 req->rq_disk->disk_name, brq.cmd.error);
302 goto cmd_err;
303 } 296 }
304 297
305 if (brq.data.error) { 298 if (brq.data.error) {
306 printk(KERN_ERR "%s: error %d transferring data\n", 299 printk(KERN_ERR "%s: error %d transferring data\n",
307 req->rq_disk->disk_name, brq.data.error); 300 req->rq_disk->disk_name, brq.data.error);
308 goto cmd_err;
309 } 301 }
310 302
311 if (brq.stop.error) { 303 if (brq.stop.error) {
312 printk(KERN_ERR "%s: error %d sending stop command\n", 304 printk(KERN_ERR "%s: error %d sending stop command\n",
313 req->rq_disk->disk_name, brq.stop.error); 305 req->rq_disk->disk_name, brq.stop.error);
314 goto cmd_err;
315 } 306 }
316 307
317 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 308 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
@@ -344,6 +335,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
344#endif 335#endif
345 } 336 }
346 337
338 if (brq.cmd.error || brq.data.error || brq.stop.error)
339 goto cmd_err;
340
347 /* 341 /*
348 * A block was successfully transferred. 342 * A block was successfully transferred.
349 */ 343 */
@@ -362,30 +356,32 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
362 * mark the known good sectors as ok. 356 * mark the known good sectors as ok.
363 * 357 *
364 * If the card is not SD, we can still ok written sectors 358 * If the card is not SD, we can still ok written sectors
365 * if the controller can do proper error reporting. 359 * as reported by the controller (which might be less than
360 * the real number of written sectors, but never more).
366 * 361 *
367 * For reads we just fail the entire chunk as that should 362 * For reads we just fail the entire chunk as that should
368 * be safe in all cases. 363 * be safe in all cases.
369 */ 364 */
370 if (rq_data_dir(req) != READ && mmc_card_sd(card)) { 365 if (rq_data_dir(req) != READ) {
371 u32 blocks; 366 if (mmc_card_sd(card)) {
372 unsigned int bytes; 367 u32 blocks;
373 368 unsigned int bytes;
374 blocks = mmc_sd_num_wr_blocks(card); 369
375 if (blocks != (u32)-1) { 370 blocks = mmc_sd_num_wr_blocks(card);
376 if (card->csd.write_partial) 371 if (blocks != (u32)-1) {
377 bytes = blocks << md->block_bits; 372 if (card->csd.write_partial)
378 else 373 bytes = blocks << md->block_bits;
379 bytes = blocks << 9; 374 else
375 bytes = blocks << 9;
376 spin_lock_irq(&md->lock);
377 ret = __blk_end_request(req, 0, bytes);
378 spin_unlock_irq(&md->lock);
379 }
380 } else {
380 spin_lock_irq(&md->lock); 381 spin_lock_irq(&md->lock);
381 ret = __blk_end_request(req, 0, bytes); 382 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
382 spin_unlock_irq(&md->lock); 383 spin_unlock_irq(&md->lock);
383 } 384 }
384 } else if (rq_data_dir(req) != READ &&
385 (card->host->caps & MMC_CAP_MULTIWRITE)) {
386 spin_lock_irq(&md->lock);
387 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
388 spin_unlock_irq(&md->lock);
389 } 385 }
390 386
391 mmc_release_host(card->host); 387 mmc_release_host(card->host);