diff options
-rw-r--r-- | drivers/block/amiflop.c | 10 | ||||
-rw-r--r-- | drivers/block/ataflop.c | 14 | ||||
-rw-r--r-- | drivers/block/hd.c | 14 | ||||
-rw-r--r-- | drivers/block/mg_disk.c | 16 | ||||
-rw-r--r-- | drivers/block/paride/pcd.c | 12 | ||||
-rw-r--r-- | drivers/block/paride/pd.c | 5 | ||||
-rw-r--r-- | drivers/block/paride/pf.c | 28 | ||||
-rw-r--r-- | drivers/block/ps3disk.c | 6 | ||||
-rw-r--r-- | drivers/block/swim.c | 14 | ||||
-rw-r--r-- | drivers/block/swim3.c | 26 | ||||
-rw-r--r-- | drivers/block/xd.c | 15 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 2 | ||||
-rw-r--r-- | drivers/block/xsysace.c | 4 | ||||
-rw-r--r-- | drivers/block/z2ram.c | 4 | ||||
-rw-r--r-- | drivers/cdrom/gdrom.c | 6 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.c | 2 | ||||
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 22 | ||||
-rw-r--r-- | drivers/sbus/char/jsflash.c | 8 | ||||
-rw-r--r-- | include/linux/blkdev.h | 46 |
19 files changed, 128 insertions, 126 deletions
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 8df436ff7068..b99a2a606d02 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -1359,7 +1359,7 @@ static void redo_fd_request(void) | |||
1359 | #endif | 1359 | #endif |
1360 | block = CURRENT->sector + cnt; | 1360 | block = CURRENT->sector + cnt; |
1361 | if ((int)block > floppy->blocks) { | 1361 | if ((int)block > floppy->blocks) { |
1362 | end_request(CURRENT, 0); | 1362 | __blk_end_request_cur(CURRENT, -EIO); |
1363 | goto repeat; | 1363 | goto repeat; |
1364 | } | 1364 | } |
1365 | 1365 | ||
@@ -1373,11 +1373,11 @@ static void redo_fd_request(void) | |||
1373 | 1373 | ||
1374 | if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) { | 1374 | if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) { |
1375 | printk(KERN_WARNING "do_fd_request: unknown command\n"); | 1375 | printk(KERN_WARNING "do_fd_request: unknown command\n"); |
1376 | end_request(CURRENT, 0); | 1376 | __blk_end_request_cur(CURRENT, -EIO); |
1377 | goto repeat; | 1377 | goto repeat; |
1378 | } | 1378 | } |
1379 | if (get_track(drive, track) == -1) { | 1379 | if (get_track(drive, track) == -1) { |
1380 | end_request(CURRENT, 0); | 1380 | __blk_end_request_cur(CURRENT, -EIO); |
1381 | goto repeat; | 1381 | goto repeat; |
1382 | } | 1382 | } |
1383 | 1383 | ||
@@ -1391,7 +1391,7 @@ static void redo_fd_request(void) | |||
1391 | 1391 | ||
1392 | /* keep the drive spinning while writes are scheduled */ | 1392 | /* keep the drive spinning while writes are scheduled */ |
1393 | if (!fd_motor_on(drive)) { | 1393 | if (!fd_motor_on(drive)) { |
1394 | end_request(CURRENT, 0); | 1394 | __blk_end_request_cur(CURRENT, -EIO); |
1395 | goto repeat; | 1395 | goto repeat; |
1396 | } | 1396 | } |
1397 | /* | 1397 | /* |
@@ -1410,7 +1410,7 @@ static void redo_fd_request(void) | |||
1410 | CURRENT->nr_sectors -= CURRENT->current_nr_sectors; | 1410 | CURRENT->nr_sectors -= CURRENT->current_nr_sectors; |
1411 | CURRENT->sector += CURRENT->current_nr_sectors; | 1411 | CURRENT->sector += CURRENT->current_nr_sectors; |
1412 | 1412 | ||
1413 | end_request(CURRENT, 1); | 1413 | __blk_end_request_cur(CURRENT, 0); |
1414 | goto repeat; | 1414 | goto repeat; |
1415 | } | 1415 | } |
1416 | 1416 | ||
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 4234c11c1e4c..44a8702136a9 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -612,7 +612,7 @@ static void fd_error( void ) | |||
612 | CURRENT->errors++; | 612 | CURRENT->errors++; |
613 | if (CURRENT->errors >= MAX_ERRORS) { | 613 | if (CURRENT->errors >= MAX_ERRORS) { |
614 | printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); | 614 | printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); |
615 | end_request(CURRENT, 0); | 615 | __blk_end_request_cur(CURRENT, -EIO); |
616 | } | 616 | } |
617 | else if (CURRENT->errors == RECALIBRATE_ERRORS) { | 617 | else if (CURRENT->errors == RECALIBRATE_ERRORS) { |
618 | printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); | 618 | printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); |
@@ -734,7 +734,7 @@ static void do_fd_action( int drive ) | |||
734 | /* all sectors finished */ | 734 | /* all sectors finished */ |
735 | CURRENT->nr_sectors -= CURRENT->current_nr_sectors; | 735 | CURRENT->nr_sectors -= CURRENT->current_nr_sectors; |
736 | CURRENT->sector += CURRENT->current_nr_sectors; | 736 | CURRENT->sector += CURRENT->current_nr_sectors; |
737 | end_request(CURRENT, 1); | 737 | __blk_end_request_cur(CURRENT, 0); |
738 | redo_fd_request(); | 738 | redo_fd_request(); |
739 | return; | 739 | return; |
740 | } | 740 | } |
@@ -1141,7 +1141,7 @@ static void fd_rwsec_done1(int status) | |||
1141 | /* all sectors finished */ | 1141 | /* all sectors finished */ |
1142 | CURRENT->nr_sectors -= CURRENT->current_nr_sectors; | 1142 | CURRENT->nr_sectors -= CURRENT->current_nr_sectors; |
1143 | CURRENT->sector += CURRENT->current_nr_sectors; | 1143 | CURRENT->sector += CURRENT->current_nr_sectors; |
1144 | end_request(CURRENT, 1); | 1144 | __blk_end_request_cur(CURRENT, 0); |
1145 | redo_fd_request(); | 1145 | redo_fd_request(); |
1146 | } | 1146 | } |
1147 | return; | 1147 | return; |
@@ -1414,7 +1414,7 @@ repeat: | |||
1414 | if (!UD.connected) { | 1414 | if (!UD.connected) { |
1415 | /* drive not connected */ | 1415 | /* drive not connected */ |
1416 | printk(KERN_ERR "Unknown Device: fd%d\n", drive ); | 1416 | printk(KERN_ERR "Unknown Device: fd%d\n", drive ); |
1417 | end_request(CURRENT, 0); | 1417 | __blk_end_request_cur(CURRENT, -EIO); |
1418 | goto repeat; | 1418 | goto repeat; |
1419 | } | 1419 | } |
1420 | 1420 | ||
@@ -1430,12 +1430,12 @@ repeat: | |||
1430 | /* user supplied disk type */ | 1430 | /* user supplied disk type */ |
1431 | if (--type >= NUM_DISK_MINORS) { | 1431 | if (--type >= NUM_DISK_MINORS) { |
1432 | printk(KERN_WARNING "fd%d: invalid disk format", drive ); | 1432 | printk(KERN_WARNING "fd%d: invalid disk format", drive ); |
1433 | end_request(CURRENT, 0); | 1433 | __blk_end_request_cur(CURRENT, -EIO); |
1434 | goto repeat; | 1434 | goto repeat; |
1435 | } | 1435 | } |
1436 | if (minor2disktype[type].drive_types > DriveType) { | 1436 | if (minor2disktype[type].drive_types > DriveType) { |
1437 | printk(KERN_WARNING "fd%d: unsupported disk format", drive ); | 1437 | printk(KERN_WARNING "fd%d: unsupported disk format", drive ); |
1438 | end_request(CURRENT, 0); | 1438 | __blk_end_request_cur(CURRENT, -EIO); |
1439 | goto repeat; | 1439 | goto repeat; |
1440 | } | 1440 | } |
1441 | type = minor2disktype[type].index; | 1441 | type = minor2disktype[type].index; |
@@ -1445,7 +1445,7 @@ repeat: | |||
1445 | } | 1445 | } |
1446 | 1446 | ||
1447 | if (CURRENT->sector + 1 > UDT->blocks) { | 1447 | if (CURRENT->sector + 1 > UDT->blocks) { |
1448 | end_request(CURRENT, 0); | 1448 | __blk_end_request_cur(CURRENT, -EIO); |
1449 | goto repeat; | 1449 | goto repeat; |
1450 | } | 1450 | } |
1451 | 1451 | ||
diff --git a/drivers/block/hd.c b/drivers/block/hd.c index baaa9e486e50..5cb300b81c6a 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c | |||
@@ -410,7 +410,7 @@ static void bad_rw_intr(void) | |||
410 | if (req != NULL) { | 410 | if (req != NULL) { |
411 | struct hd_i_struct *disk = req->rq_disk->private_data; | 411 | struct hd_i_struct *disk = req->rq_disk->private_data; |
412 | if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { | 412 | if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { |
413 | end_request(req, 0); | 413 | __blk_end_request_cur(req, -EIO); |
414 | disk->special_op = disk->recalibrate = 1; | 414 | disk->special_op = disk->recalibrate = 1; |
415 | } else if (req->errors % RESET_FREQ == 0) | 415 | } else if (req->errors % RESET_FREQ == 0) |
416 | reset = 1; | 416 | reset = 1; |
@@ -466,7 +466,7 @@ ok_to_read: | |||
466 | req->buffer+512); | 466 | req->buffer+512); |
467 | #endif | 467 | #endif |
468 | if (req->current_nr_sectors <= 0) | 468 | if (req->current_nr_sectors <= 0) |
469 | end_request(req, 1); | 469 | __blk_end_request_cur(req, 0); |
470 | if (i > 0) { | 470 | if (i > 0) { |
471 | SET_HANDLER(&read_intr); | 471 | SET_HANDLER(&read_intr); |
472 | return; | 472 | return; |
@@ -505,7 +505,7 @@ ok_to_write: | |||
505 | --req->current_nr_sectors; | 505 | --req->current_nr_sectors; |
506 | req->buffer += 512; | 506 | req->buffer += 512; |
507 | if (!i || (req->bio && req->current_nr_sectors <= 0)) | 507 | if (!i || (req->bio && req->current_nr_sectors <= 0)) |
508 | end_request(req, 1); | 508 | __blk_end_request_cur(req, 0); |
509 | if (i > 0) { | 509 | if (i > 0) { |
510 | SET_HANDLER(&write_intr); | 510 | SET_HANDLER(&write_intr); |
511 | outsw(HD_DATA, req->buffer, 256); | 511 | outsw(HD_DATA, req->buffer, 256); |
@@ -548,7 +548,7 @@ static void hd_times_out(unsigned long dummy) | |||
548 | #ifdef DEBUG | 548 | #ifdef DEBUG |
549 | printk("%s: too many errors\n", name); | 549 | printk("%s: too many errors\n", name); |
550 | #endif | 550 | #endif |
551 | end_request(CURRENT, 0); | 551 | __blk_end_request_cur(CURRENT, -EIO); |
552 | } | 552 | } |
553 | hd_request(); | 553 | hd_request(); |
554 | spin_unlock_irq(hd_queue->queue_lock); | 554 | spin_unlock_irq(hd_queue->queue_lock); |
@@ -563,7 +563,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req) | |||
563 | } | 563 | } |
564 | if (disk->head > 16) { | 564 | if (disk->head > 16) { |
565 | printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); | 565 | printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); |
566 | end_request(req, 0); | 566 | __blk_end_request_cur(req, -EIO); |
567 | } | 567 | } |
568 | disk->special_op = 0; | 568 | disk->special_op = 0; |
569 | return 1; | 569 | return 1; |
@@ -607,7 +607,7 @@ repeat: | |||
607 | ((block+nsect) > get_capacity(req->rq_disk))) { | 607 | ((block+nsect) > get_capacity(req->rq_disk))) { |
608 | printk("%s: bad access: block=%d, count=%d\n", | 608 | printk("%s: bad access: block=%d, count=%d\n", |
609 | req->rq_disk->disk_name, block, nsect); | 609 | req->rq_disk->disk_name, block, nsect); |
610 | end_request(req, 0); | 610 | __blk_end_request_cur(req, -EIO); |
611 | goto repeat; | 611 | goto repeat; |
612 | } | 612 | } |
613 | 613 | ||
@@ -647,7 +647,7 @@ repeat: | |||
647 | break; | 647 | break; |
648 | default: | 648 | default: |
649 | printk("unknown hd-command\n"); | 649 | printk("unknown hd-command\n"); |
650 | end_request(req, 0); | 650 | __blk_end_request_cur(req, -EIO); |
651 | break; | 651 | break; |
652 | } | 652 | } |
653 | } | 653 | } |
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index f3898353d0a8..408c2bd8a439 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -285,7 +285,7 @@ static void mg_bad_rw_intr(struct mg_host *host) | |||
285 | if (req != NULL) | 285 | if (req != NULL) |
286 | if (++req->errors >= MG_MAX_ERRORS || | 286 | if (++req->errors >= MG_MAX_ERRORS || |
287 | host->error == MG_ERR_TIMEOUT) | 287 | host->error == MG_ERR_TIMEOUT) |
288 | end_request(req, 0); | 288 | __blk_end_request_cur(req, -EIO); |
289 | } | 289 | } |
290 | 290 | ||
291 | static unsigned int mg_out(struct mg_host *host, | 291 | static unsigned int mg_out(struct mg_host *host, |
@@ -351,7 +351,7 @@ static void mg_read(struct request *req) | |||
351 | 351 | ||
352 | if (req->current_nr_sectors <= 0) { | 352 | if (req->current_nr_sectors <= 0) { |
353 | MG_DBG("remain : %d sects\n", remains); | 353 | MG_DBG("remain : %d sects\n", remains); |
354 | end_request(req, 1); | 354 | __blk_end_request_cur(req, 0); |
355 | if (remains > 0) | 355 | if (remains > 0) |
356 | req = elv_next_request(host->breq); | 356 | req = elv_next_request(host->breq); |
357 | } | 357 | } |
@@ -395,7 +395,7 @@ static void mg_write(struct request *req) | |||
395 | 395 | ||
396 | if (req->current_nr_sectors <= 0) { | 396 | if (req->current_nr_sectors <= 0) { |
397 | MG_DBG("remain : %d sects\n", remains); | 397 | MG_DBG("remain : %d sects\n", remains); |
398 | end_request(req, 1); | 398 | __blk_end_request_cur(req, 0); |
399 | if (remains > 0) | 399 | if (remains > 0) |
400 | req = elv_next_request(host->breq); | 400 | req = elv_next_request(host->breq); |
401 | } | 401 | } |
@@ -448,7 +448,7 @@ ok_to_read: | |||
448 | 448 | ||
449 | /* let know if current segment done */ | 449 | /* let know if current segment done */ |
450 | if (req->current_nr_sectors <= 0) | 450 | if (req->current_nr_sectors <= 0) |
451 | end_request(req, 1); | 451 | __blk_end_request_cur(req, 0); |
452 | 452 | ||
453 | /* set handler if read remains */ | 453 | /* set handler if read remains */ |
454 | if (i > 0) { | 454 | if (i > 0) { |
@@ -497,7 +497,7 @@ ok_to_write: | |||
497 | 497 | ||
498 | /* let know if current segment or all done */ | 498 | /* let know if current segment or all done */ |
499 | if (!i || (req->bio && req->current_nr_sectors <= 0)) | 499 | if (!i || (req->bio && req->current_nr_sectors <= 0)) |
500 | end_request(req, 1); | 500 | __blk_end_request_cur(req, 0); |
501 | 501 | ||
502 | /* write 1 sector and set handler if remains */ | 502 | /* write 1 sector and set handler if remains */ |
503 | if (i > 0) { | 503 | if (i > 0) { |
@@ -563,7 +563,7 @@ static void mg_request_poll(struct request_queue *q) | |||
563 | default: | 563 | default: |
564 | printk(KERN_WARNING "%s:%d unknown command\n", | 564 | printk(KERN_WARNING "%s:%d unknown command\n", |
565 | __func__, __LINE__); | 565 | __func__, __LINE__); |
566 | end_request(req, 0); | 566 | __blk_end_request_cur(req, -EIO); |
567 | break; | 567 | break; |
568 | } | 568 | } |
569 | } | 569 | } |
@@ -617,7 +617,7 @@ static unsigned int mg_issue_req(struct request *req, | |||
617 | default: | 617 | default: |
618 | printk(KERN_WARNING "%s:%d unknown command\n", | 618 | printk(KERN_WARNING "%s:%d unknown command\n", |
619 | __func__, __LINE__); | 619 | __func__, __LINE__); |
620 | end_request(req, 0); | 620 | __blk_end_request_cur(req, -EIO); |
621 | break; | 621 | break; |
622 | } | 622 | } |
623 | return MG_ERR_NONE; | 623 | return MG_ERR_NONE; |
@@ -655,7 +655,7 @@ static void mg_request(struct request_queue *q) | |||
655 | "%s: bad access: sector=%d, count=%d\n", | 655 | "%s: bad access: sector=%d, count=%d\n", |
656 | req->rq_disk->disk_name, | 656 | req->rq_disk->disk_name, |
657 | sect_num, sect_cnt); | 657 | sect_num, sect_cnt); |
658 | end_request(req, 0); | 658 | __blk_end_request_cur(req, -EIO); |
659 | continue; | 659 | continue; |
660 | } | 660 | } |
661 | 661 | ||
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index e91d4b4b014f..9fd57c2aa463 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
@@ -735,16 +735,16 @@ static void do_pcd_request(struct request_queue * q) | |||
735 | ps_set_intr(do_pcd_read, NULL, 0, nice); | 735 | ps_set_intr(do_pcd_read, NULL, 0, nice); |
736 | return; | 736 | return; |
737 | } else | 737 | } else |
738 | end_request(pcd_req, 0); | 738 | __blk_end_request_cur(pcd_req, -EIO); |
739 | } | 739 | } |
740 | } | 740 | } |
741 | 741 | ||
742 | static inline void next_request(int success) | 742 | static inline void next_request(int err) |
743 | { | 743 | { |
744 | unsigned long saved_flags; | 744 | unsigned long saved_flags; |
745 | 745 | ||
746 | spin_lock_irqsave(&pcd_lock, saved_flags); | 746 | spin_lock_irqsave(&pcd_lock, saved_flags); |
747 | end_request(pcd_req, success); | 747 | __blk_end_request_cur(pcd_req, err); |
748 | pcd_busy = 0; | 748 | pcd_busy = 0; |
749 | do_pcd_request(pcd_queue); | 749 | do_pcd_request(pcd_queue); |
750 | spin_unlock_irqrestore(&pcd_lock, saved_flags); | 750 | spin_unlock_irqrestore(&pcd_lock, saved_flags); |
@@ -781,7 +781,7 @@ static void pcd_start(void) | |||
781 | 781 | ||
782 | if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { | 782 | if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { |
783 | pcd_bufblk = -1; | 783 | pcd_bufblk = -1; |
784 | next_request(0); | 784 | next_request(-EIO); |
785 | return; | 785 | return; |
786 | } | 786 | } |
787 | 787 | ||
@@ -796,7 +796,7 @@ static void do_pcd_read(void) | |||
796 | pcd_retries = 0; | 796 | pcd_retries = 0; |
797 | pcd_transfer(); | 797 | pcd_transfer(); |
798 | if (!pcd_count) { | 798 | if (!pcd_count) { |
799 | next_request(1); | 799 | next_request(0); |
800 | return; | 800 | return; |
801 | } | 801 | } |
802 | 802 | ||
@@ -815,7 +815,7 @@ static void do_pcd_read_drq(void) | |||
815 | return; | 815 | return; |
816 | } | 816 | } |
817 | pcd_bufblk = -1; | 817 | pcd_bufblk = -1; |
818 | next_request(0); | 818 | next_request(-EIO); |
819 | return; | 819 | return; |
820 | } | 820 | } |
821 | 821 | ||
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 9299455b0af6..0732df4e901a 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -410,7 +410,8 @@ static void run_fsm(void) | |||
410 | pd_claimed = 0; | 410 | pd_claimed = 0; |
411 | phase = NULL; | 411 | phase = NULL; |
412 | spin_lock_irqsave(&pd_lock, saved_flags); | 412 | spin_lock_irqsave(&pd_lock, saved_flags); |
413 | end_request(pd_req, res); | 413 | __blk_end_request_cur(pd_req, |
414 | res == Ok ? 0 : -EIO); | ||
414 | pd_req = elv_next_request(pd_queue); | 415 | pd_req = elv_next_request(pd_queue); |
415 | if (!pd_req) | 416 | if (!pd_req) |
416 | stop = 1; | 417 | stop = 1; |
@@ -477,7 +478,7 @@ static int pd_next_buf(void) | |||
477 | if (pd_count) | 478 | if (pd_count) |
478 | return 0; | 479 | return 0; |
479 | spin_lock_irqsave(&pd_lock, saved_flags); | 480 | spin_lock_irqsave(&pd_lock, saved_flags); |
480 | end_request(pd_req, 1); | 481 | __blk_end_request_cur(pd_req, 0); |
481 | pd_count = pd_req->current_nr_sectors; | 482 | pd_count = pd_req->current_nr_sectors; |
482 | pd_buf = pd_req->buffer; | 483 | pd_buf = pd_req->buffer; |
483 | spin_unlock_irqrestore(&pd_lock, saved_flags); | 484 | spin_unlock_irqrestore(&pd_lock, saved_flags); |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index bef3b997ba3e..3871e3586d6d 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -750,10 +750,10 @@ static int pf_ready(void) | |||
750 | 750 | ||
751 | static struct request_queue *pf_queue; | 751 | static struct request_queue *pf_queue; |
752 | 752 | ||
753 | static void pf_end_request(int uptodate) | 753 | static void pf_end_request(int err) |
754 | { | 754 | { |
755 | if (pf_req) { | 755 | if (pf_req) { |
756 | end_request(pf_req, uptodate); | 756 | __blk_end_request_cur(pf_req, err); |
757 | pf_req = NULL; | 757 | pf_req = NULL; |
758 | } | 758 | } |
759 | } | 759 | } |
@@ -773,7 +773,7 @@ repeat: | |||
773 | pf_count = pf_req->current_nr_sectors; | 773 | pf_count = pf_req->current_nr_sectors; |
774 | 774 | ||
775 | if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { | 775 | if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { |
776 | pf_end_request(0); | 776 | pf_end_request(-EIO); |
777 | goto repeat; | 777 | goto repeat; |
778 | } | 778 | } |
779 | 779 | ||
@@ -788,7 +788,7 @@ repeat: | |||
788 | pi_do_claimed(pf_current->pi, do_pf_write); | 788 | pi_do_claimed(pf_current->pi, do_pf_write); |
789 | else { | 789 | else { |
790 | pf_busy = 0; | 790 | pf_busy = 0; |
791 | pf_end_request(0); | 791 | pf_end_request(-EIO); |
792 | goto repeat; | 792 | goto repeat; |
793 | } | 793 | } |
794 | } | 794 | } |
@@ -805,7 +805,7 @@ static int pf_next_buf(void) | |||
805 | return 1; | 805 | return 1; |
806 | if (!pf_count) { | 806 | if (!pf_count) { |
807 | spin_lock_irqsave(&pf_spin_lock, saved_flags); | 807 | spin_lock_irqsave(&pf_spin_lock, saved_flags); |
808 | pf_end_request(1); | 808 | pf_end_request(0); |
809 | pf_req = elv_next_request(pf_queue); | 809 | pf_req = elv_next_request(pf_queue); |
810 | spin_unlock_irqrestore(&pf_spin_lock, saved_flags); | 810 | spin_unlock_irqrestore(&pf_spin_lock, saved_flags); |
811 | if (!pf_req) | 811 | if (!pf_req) |
@@ -816,12 +816,12 @@ static int pf_next_buf(void) | |||
816 | return 0; | 816 | return 0; |
817 | } | 817 | } |
818 | 818 | ||
819 | static inline void next_request(int success) | 819 | static inline void next_request(int err) |
820 | { | 820 | { |
821 | unsigned long saved_flags; | 821 | unsigned long saved_flags; |
822 | 822 | ||
823 | spin_lock_irqsave(&pf_spin_lock, saved_flags); | 823 | spin_lock_irqsave(&pf_spin_lock, saved_flags); |
824 | pf_end_request(success); | 824 | pf_end_request(err); |
825 | pf_busy = 0; | 825 | pf_busy = 0; |
826 | do_pf_request(pf_queue); | 826 | do_pf_request(pf_queue); |
827 | spin_unlock_irqrestore(&pf_spin_lock, saved_flags); | 827 | spin_unlock_irqrestore(&pf_spin_lock, saved_flags); |
@@ -844,7 +844,7 @@ static void do_pf_read_start(void) | |||
844 | pi_do_claimed(pf_current->pi, do_pf_read_start); | 844 | pi_do_claimed(pf_current->pi, do_pf_read_start); |
845 | return; | 845 | return; |
846 | } | 846 | } |
847 | next_request(0); | 847 | next_request(-EIO); |
848 | return; | 848 | return; |
849 | } | 849 | } |
850 | pf_mask = STAT_DRQ; | 850 | pf_mask = STAT_DRQ; |
@@ -863,7 +863,7 @@ static void do_pf_read_drq(void) | |||
863 | pi_do_claimed(pf_current->pi, do_pf_read_start); | 863 | pi_do_claimed(pf_current->pi, do_pf_read_start); |
864 | return; | 864 | return; |
865 | } | 865 | } |
866 | next_request(0); | 866 | next_request(-EIO); |
867 | return; | 867 | return; |
868 | } | 868 | } |
869 | pi_read_block(pf_current->pi, pf_buf, 512); | 869 | pi_read_block(pf_current->pi, pf_buf, 512); |
@@ -871,7 +871,7 @@ static void do_pf_read_drq(void) | |||
871 | break; | 871 | break; |
872 | } | 872 | } |
873 | pi_disconnect(pf_current->pi); | 873 | pi_disconnect(pf_current->pi); |
874 | next_request(1); | 874 | next_request(0); |
875 | } | 875 | } |
876 | 876 | ||
877 | static void do_pf_write(void) | 877 | static void do_pf_write(void) |
@@ -890,7 +890,7 @@ static void do_pf_write_start(void) | |||
890 | pi_do_claimed(pf_current->pi, do_pf_write_start); | 890 | pi_do_claimed(pf_current->pi, do_pf_write_start); |
891 | return; | 891 | return; |
892 | } | 892 | } |
893 | next_request(0); | 893 | next_request(-EIO); |
894 | return; | 894 | return; |
895 | } | 895 | } |
896 | 896 | ||
@@ -903,7 +903,7 @@ static void do_pf_write_start(void) | |||
903 | pi_do_claimed(pf_current->pi, do_pf_write_start); | 903 | pi_do_claimed(pf_current->pi, do_pf_write_start); |
904 | return; | 904 | return; |
905 | } | 905 | } |
906 | next_request(0); | 906 | next_request(-EIO); |
907 | return; | 907 | return; |
908 | } | 908 | } |
909 | pi_write_block(pf_current->pi, pf_buf, 512); | 909 | pi_write_block(pf_current->pi, pf_buf, 512); |
@@ -923,11 +923,11 @@ static void do_pf_write_done(void) | |||
923 | pi_do_claimed(pf_current->pi, do_pf_write_start); | 923 | pi_do_claimed(pf_current->pi, do_pf_write_start); |
924 | return; | 924 | return; |
925 | } | 925 | } |
926 | next_request(0); | 926 | next_request(-EIO); |
927 | return; | 927 | return; |
928 | } | 928 | } |
929 | pi_disconnect(pf_current->pi); | 929 | pi_disconnect(pf_current->pi); |
930 | next_request(1); | 930 | next_request(0); |
931 | } | 931 | } |
932 | 932 | ||
933 | static int __init pf_init(void) | 933 | static int __init pf_init(void) |
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index bccc42bb9212..d23b54bc2f50 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c | |||
@@ -158,7 +158,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev, | |||
158 | if (res) { | 158 | if (res) { |
159 | dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, | 159 | dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, |
160 | __LINE__, op, res); | 160 | __LINE__, op, res); |
161 | end_request(req, 0); | 161 | __blk_end_request_cur(req, -EIO); |
162 | return 0; | 162 | return 0; |
163 | } | 163 | } |
164 | 164 | ||
@@ -180,7 +180,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev, | |||
180 | if (res) { | 180 | if (res) { |
181 | dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", | 181 | dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", |
182 | __func__, __LINE__, res); | 182 | __func__, __LINE__, res); |
183 | end_request(req, 0); | 183 | __blk_end_request_cur(req, -EIO); |
184 | return 0; | 184 | return 0; |
185 | } | 185 | } |
186 | 186 | ||
@@ -205,7 +205,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, | |||
205 | break; | 205 | break; |
206 | } else { | 206 | } else { |
207 | blk_dump_rq_flags(req, DEVICE_NAME " bad request"); | 207 | blk_dump_rq_flags(req, DEVICE_NAME " bad request"); |
208 | end_request(req, 0); | 208 | __blk_end_request_cur(req, -EIO); |
209 | continue; | 209 | continue; |
210 | } | 210 | } |
211 | } | 211 | } |
diff --git a/drivers/block/swim.c b/drivers/block/swim.c index d22cc3856937..6544a7b06bf0 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c | |||
@@ -532,39 +532,39 @@ static void redo_fd_request(struct request_queue *q) | |||
532 | 532 | ||
533 | fs = req->rq_disk->private_data; | 533 | fs = req->rq_disk->private_data; |
534 | if (req->sector < 0 || req->sector >= fs->total_secs) { | 534 | if (req->sector < 0 || req->sector >= fs->total_secs) { |
535 | end_request(req, 0); | 535 | __blk_end_request_cur(req, -EIO); |
536 | continue; | 536 | continue; |
537 | } | 537 | } |
538 | if (req->current_nr_sectors == 0) { | 538 | if (req->current_nr_sectors == 0) { |
539 | end_request(req, 1); | 539 | __blk_end_request_cur(req, 0); |
540 | continue; | 540 | continue; |
541 | } | 541 | } |
542 | if (!fs->disk_in) { | 542 | if (!fs->disk_in) { |
543 | end_request(req, 0); | 543 | __blk_end_request_cur(req, -EIO); |
544 | continue; | 544 | continue; |
545 | } | 545 | } |
546 | if (rq_data_dir(req) == WRITE) { | 546 | if (rq_data_dir(req) == WRITE) { |
547 | if (fs->write_protected) { | 547 | if (fs->write_protected) { |
548 | end_request(req, 0); | 548 | __blk_end_request_cur(req, -EIO); |
549 | continue; | 549 | continue; |
550 | } | 550 | } |
551 | } | 551 | } |
552 | switch (rq_data_dir(req)) { | 552 | switch (rq_data_dir(req)) { |
553 | case WRITE: | 553 | case WRITE: |
554 | /* NOT IMPLEMENTED */ | 554 | /* NOT IMPLEMENTED */ |
555 | end_request(req, 0); | 555 | __blk_end_request_cur(req, -EIO); |
556 | break; | 556 | break; |
557 | case READ: | 557 | case READ: |
558 | if (floppy_read_sectors(fs, req->sector, | 558 | if (floppy_read_sectors(fs, req->sector, |
559 | req->current_nr_sectors, | 559 | req->current_nr_sectors, |
560 | req->buffer)) { | 560 | req->buffer)) { |
561 | end_request(req, 0); | 561 | __blk_end_request_cur(req, -EIO); |
562 | continue; | 562 | continue; |
563 | } | 563 | } |
564 | req->nr_sectors -= req->current_nr_sectors; | 564 | req->nr_sectors -= req->current_nr_sectors; |
565 | req->sector += req->current_nr_sectors; | 565 | req->sector += req->current_nr_sectors; |
566 | req->buffer += req->current_nr_sectors * 512; | 566 | req->buffer += req->current_nr_sectors * 512; |
567 | end_request(req, 1); | 567 | __blk_end_request_cur(req, 0); |
568 | break; | 568 | break; |
569 | } | 569 | } |
570 | } | 570 | } |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 612965307ba0..5904f7b73c6e 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -320,15 +320,15 @@ static void start_request(struct floppy_state *fs) | |||
320 | #endif | 320 | #endif |
321 | 321 | ||
322 | if (req->sector < 0 || req->sector >= fs->total_secs) { | 322 | if (req->sector < 0 || req->sector >= fs->total_secs) { |
323 | end_request(req, 0); | 323 | __blk_end_request_cur(req, -EIO); |
324 | continue; | 324 | continue; |
325 | } | 325 | } |
326 | if (req->current_nr_sectors == 0) { | 326 | if (req->current_nr_sectors == 0) { |
327 | end_request(req, 1); | 327 | __blk_end_request_cur(req, 0); |
328 | continue; | 328 | continue; |
329 | } | 329 | } |
330 | if (fs->ejected) { | 330 | if (fs->ejected) { |
331 | end_request(req, 0); | 331 | __blk_end_request_cur(req, -EIO); |
332 | continue; | 332 | continue; |
333 | } | 333 | } |
334 | 334 | ||
@@ -336,7 +336,7 @@ static void start_request(struct floppy_state *fs) | |||
336 | if (fs->write_prot < 0) | 336 | if (fs->write_prot < 0) |
337 | fs->write_prot = swim3_readbit(fs, WRITE_PROT); | 337 | fs->write_prot = swim3_readbit(fs, WRITE_PROT); |
338 | if (fs->write_prot) { | 338 | if (fs->write_prot) { |
339 | end_request(req, 0); | 339 | __blk_end_request_cur(req, -EIO); |
340 | continue; | 340 | continue; |
341 | } | 341 | } |
342 | } | 342 | } |
@@ -508,7 +508,7 @@ static void act(struct floppy_state *fs) | |||
508 | case do_transfer: | 508 | case do_transfer: |
509 | if (fs->cur_cyl != fs->req_cyl) { | 509 | if (fs->cur_cyl != fs->req_cyl) { |
510 | if (fs->retries > 5) { | 510 | if (fs->retries > 5) { |
511 | end_request(fd_req, 0); | 511 | __blk_end_request_cur(fd_req, -EIO); |
512 | fs->state = idle; | 512 | fs->state = idle; |
513 | return; | 513 | return; |
514 | } | 514 | } |
@@ -540,7 +540,7 @@ static void scan_timeout(unsigned long data) | |||
540 | out_8(&sw->intr_enable, 0); | 540 | out_8(&sw->intr_enable, 0); |
541 | fs->cur_cyl = -1; | 541 | fs->cur_cyl = -1; |
542 | if (fs->retries > 5) { | 542 | if (fs->retries > 5) { |
543 | end_request(fd_req, 0); | 543 | __blk_end_request_cur(fd_req, -EIO); |
544 | fs->state = idle; | 544 | fs->state = idle; |
545 | start_request(fs); | 545 | start_request(fs); |
546 | } else { | 546 | } else { |
@@ -559,7 +559,7 @@ static void seek_timeout(unsigned long data) | |||
559 | out_8(&sw->select, RELAX); | 559 | out_8(&sw->select, RELAX); |
560 | out_8(&sw->intr_enable, 0); | 560 | out_8(&sw->intr_enable, 0); |
561 | printk(KERN_ERR "swim3: seek timeout\n"); | 561 | printk(KERN_ERR "swim3: seek timeout\n"); |
562 | end_request(fd_req, 0); | 562 | __blk_end_request_cur(fd_req, -EIO); |
563 | fs->state = idle; | 563 | fs->state = idle; |
564 | start_request(fs); | 564 | start_request(fs); |
565 | } | 565 | } |
@@ -583,7 +583,7 @@ static void settle_timeout(unsigned long data) | |||
583 | return; | 583 | return; |
584 | } | 584 | } |
585 | printk(KERN_ERR "swim3: seek settle timeout\n"); | 585 | printk(KERN_ERR "swim3: seek settle timeout\n"); |
586 | end_request(fd_req, 0); | 586 | __blk_end_request_cur(fd_req, -EIO); |
587 | fs->state = idle; | 587 | fs->state = idle; |
588 | start_request(fs); | 588 | start_request(fs); |
589 | } | 589 | } |
@@ -615,7 +615,7 @@ static void xfer_timeout(unsigned long data) | |||
615 | fd_req->current_nr_sectors -= s; | 615 | fd_req->current_nr_sectors -= s; |
616 | printk(KERN_ERR "swim3: timeout %sing sector %ld\n", | 616 | printk(KERN_ERR "swim3: timeout %sing sector %ld\n", |
617 | (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); | 617 | (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); |
618 | end_request(fd_req, 0); | 618 | __blk_end_request_cur(fd_req, -EIO); |
619 | fs->state = idle; | 619 | fs->state = idle; |
620 | start_request(fs); | 620 | start_request(fs); |
621 | } | 621 | } |
@@ -646,7 +646,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
646 | printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); | 646 | printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); |
647 | fs->cur_cyl = -1; | 647 | fs->cur_cyl = -1; |
648 | if (fs->retries > 5) { | 648 | if (fs->retries > 5) { |
649 | end_request(fd_req, 0); | 649 | __blk_end_request_cur(fd_req, -EIO); |
650 | fs->state = idle; | 650 | fs->state = idle; |
651 | start_request(fs); | 651 | start_request(fs); |
652 | } else { | 652 | } else { |
@@ -731,7 +731,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
731 | printk("swim3: error %sing block %ld (err=%x)\n", | 731 | printk("swim3: error %sing block %ld (err=%x)\n", |
732 | rq_data_dir(fd_req) == WRITE? "writ": "read", | 732 | rq_data_dir(fd_req) == WRITE? "writ": "read", |
733 | (long)fd_req->sector, err); | 733 | (long)fd_req->sector, err); |
734 | end_request(fd_req, 0); | 734 | __blk_end_request_cur(fd_req, -EIO); |
735 | fs->state = idle; | 735 | fs->state = idle; |
736 | } | 736 | } |
737 | } else { | 737 | } else { |
@@ -740,7 +740,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
740 | printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); | 740 | printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); |
741 | printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", | 741 | printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", |
742 | fs->state, rq_data_dir(fd_req), intr, err); | 742 | fs->state, rq_data_dir(fd_req), intr, err); |
743 | end_request(fd_req, 0); | 743 | __blk_end_request_cur(fd_req, -EIO); |
744 | fs->state = idle; | 744 | fs->state = idle; |
745 | start_request(fs); | 745 | start_request(fs); |
746 | break; | 746 | break; |
@@ -749,7 +749,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
749 | fd_req->current_nr_sectors -= fs->scount; | 749 | fd_req->current_nr_sectors -= fs->scount; |
750 | fd_req->buffer += fs->scount * 512; | 750 | fd_req->buffer += fs->scount * 512; |
751 | if (fd_req->current_nr_sectors <= 0) { | 751 | if (fd_req->current_nr_sectors <= 0) { |
752 | end_request(fd_req, 1); | 752 | __blk_end_request_cur(fd_req, 0); |
753 | fs->state = idle; | 753 | fs->state = idle; |
754 | } else { | 754 | } else { |
755 | fs->req_sector += fs->scount; | 755 | fs->req_sector += fs->scount; |
diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 64b496fce98b..6f6ad82ec0c0 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c | |||
@@ -314,21 +314,22 @@ static void do_xd_request (struct request_queue * q) | |||
314 | int retry; | 314 | int retry; |
315 | 315 | ||
316 | if (!blk_fs_request(req)) { | 316 | if (!blk_fs_request(req)) { |
317 | end_request(req, 0); | 317 | __blk_end_request_cur(req, -EIO); |
318 | continue; | 318 | continue; |
319 | } | 319 | } |
320 | if (block + count > get_capacity(req->rq_disk)) { | 320 | if (block + count > get_capacity(req->rq_disk)) { |
321 | end_request(req, 0); | 321 | __blk_end_request_cur(req, -EIO); |
322 | continue; | 322 | continue; |
323 | } | 323 | } |
324 | if (rw != READ && rw != WRITE) { | 324 | if (rw != READ && rw != WRITE) { |
325 | printk("do_xd_request: unknown request\n"); | 325 | printk("do_xd_request: unknown request\n"); |
326 | end_request(req, 0); | 326 | __blk_end_request_cur(req, -EIO); |
327 | continue; | 327 | continue; |
328 | } | 328 | } |
329 | for (retry = 0; (retry < XD_RETRIES) && !res; retry++) | 329 | for (retry = 0; (retry < XD_RETRIES) && !res; retry++) |
330 | res = xd_readwrite(rw, disk, req->buffer, block, count); | 330 | res = xd_readwrite(rw, disk, req->buffer, block, count); |
331 | end_request(req, res); /* wrap up, 0 = fail, 1 = success */ | 331 | /* wrap up, 0 = success, -errno = fail */ |
332 | __blk_end_request_cur(req, res); | ||
332 | } | 333 | } |
333 | } | 334 | } |
334 | 335 | ||
@@ -418,7 +419,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_ | |||
418 | printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); | 419 | printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); |
419 | xd_recalibrate(drive); | 420 | xd_recalibrate(drive); |
420 | spin_lock_irq(&xd_lock); | 421 | spin_lock_irq(&xd_lock); |
421 | return (0); | 422 | return -EIO; |
422 | case 2: | 423 | case 2: |
423 | if (sense[0] & 0x30) { | 424 | if (sense[0] & 0x30) { |
424 | printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); | 425 | printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); |
@@ -439,7 +440,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_ | |||
439 | else | 440 | else |
440 | printk(" - no valid disk address\n"); | 441 | printk(" - no valid disk address\n"); |
441 | spin_lock_irq(&xd_lock); | 442 | spin_lock_irq(&xd_lock); |
442 | return (0); | 443 | return -EIO; |
443 | } | 444 | } |
444 | if (xd_dma_buffer) | 445 | if (xd_dma_buffer) |
445 | for (i=0; i < (temp * 0x200); i++) | 446 | for (i=0; i < (temp * 0x200); i++) |
@@ -448,7 +449,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_ | |||
448 | count -= temp, buffer += temp * 0x200, block += temp; | 449 | count -= temp, buffer += temp * 0x200, block += temp; |
449 | } | 450 | } |
450 | spin_lock_irq(&xd_lock); | 451 | spin_lock_irq(&xd_lock); |
451 | return (1); | 452 | return 0; |
452 | } | 453 | } |
453 | 454 | ||
454 | /* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ | 455 | /* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index cd6cfe3b51e1..b4564479f641 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -302,7 +302,7 @@ static void do_blkif_request(struct request_queue *rq) | |||
302 | while ((req = elv_next_request(rq)) != NULL) { | 302 | while ((req = elv_next_request(rq)) != NULL) { |
303 | info = req->rq_disk->private_data; | 303 | info = req->rq_disk->private_data; |
304 | if (!blk_fs_request(req)) { | 304 | if (!blk_fs_request(req)) { |
305 | end_request(req, 0); | 305 | __blk_end_request_cur(req, -EIO); |
306 | continue; | 306 | continue; |
307 | } | 307 | } |
308 | 308 | ||
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 4aecf5dc6a93..b1e1d7e5ab1e 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
@@ -466,7 +466,7 @@ struct request *ace_get_next_request(struct request_queue * q) | |||
466 | while ((req = elv_next_request(q)) != NULL) { | 466 | while ((req = elv_next_request(q)) != NULL) { |
467 | if (blk_fs_request(req)) | 467 | if (blk_fs_request(req)) |
468 | break; | 468 | break; |
469 | end_request(req, 0); | 469 | __blk_end_request_cur(req, -EIO); |
470 | } | 470 | } |
471 | return req; | 471 | return req; |
472 | } | 472 | } |
@@ -494,7 +494,7 @@ static void ace_fsm_dostate(struct ace_device *ace) | |||
494 | 494 | ||
495 | /* Drop all pending requests */ | 495 | /* Drop all pending requests */ |
496 | while ((req = elv_next_request(ace->queue)) != NULL) | 496 | while ((req = elv_next_request(ace->queue)) != NULL) |
497 | end_request(req, 0); | 497 | __blk_end_request_cur(req, -EIO); |
498 | 498 | ||
499 | /* Drop back to IDLE state and notify waiters */ | 499 | /* Drop back to IDLE state and notify waiters */ |
500 | ace->fsm_state = ACE_FSM_STATE_IDLE; | 500 | ace->fsm_state = ACE_FSM_STATE_IDLE; |
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 80754cdd3119..b66ad58a3c38 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c | |||
@@ -77,7 +77,7 @@ static void do_z2_request(struct request_queue *q) | |||
77 | if (start + len > z2ram_size) { | 77 | if (start + len > z2ram_size) { |
78 | printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", | 78 | printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", |
79 | req->sector, req->current_nr_sectors); | 79 | req->sector, req->current_nr_sectors); |
80 | end_request(req, 0); | 80 | __blk_end_request_cur(req, -EIO); |
81 | continue; | 81 | continue; |
82 | } | 82 | } |
83 | while (len) { | 83 | while (len) { |
@@ -93,7 +93,7 @@ static void do_z2_request(struct request_queue *q) | |||
93 | start += size; | 93 | start += size; |
94 | len -= size; | 94 | len -= size; |
95 | } | 95 | } |
96 | end_request(req, 1); | 96 | __blk_end_request_cur(req, 0); |
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index fee9a9e83fc9..cab2b1fb2fe7 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c | |||
@@ -654,17 +654,17 @@ static void gdrom_request(struct request_queue *rq) | |||
654 | while ((req = elv_next_request(rq)) != NULL) { | 654 | while ((req = elv_next_request(rq)) != NULL) { |
655 | if (!blk_fs_request(req)) { | 655 | if (!blk_fs_request(req)) { |
656 | printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); | 656 | printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); |
657 | end_request(req, 0); | 657 | __blk_end_request_cur(req, -EIO); |
658 | } | 658 | } |
659 | if (rq_data_dir(req) != READ) { | 659 | if (rq_data_dir(req) != READ) { |
660 | printk(KERN_NOTICE "GDROM: Read only device -"); | 660 | printk(KERN_NOTICE "GDROM: Read only device -"); |
661 | printk(" write request ignored\n"); | 661 | printk(" write request ignored\n"); |
662 | end_request(req, 0); | 662 | __blk_end_request_cur(req, -EIO); |
663 | } | 663 | } |
664 | if (req->nr_sectors) | 664 | if (req->nr_sectors) |
665 | gdrom_request_handler_dma(req); | 665 | gdrom_request_handler_dma(req); |
666 | else | 666 | else |
667 | end_request(req, 0); | 667 | __blk_end_request_cur(req, -EIO); |
668 | } | 668 | } |
669 | } | 669 | } |
670 | 670 | ||
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index a443e136dc41..221317e6a006 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -923,7 +923,7 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
923 | break; | 923 | break; |
924 | } | 924 | } |
925 | } else | 925 | } else |
926 | end_request(req, 0); | 926 | __blk_end_request_cur(req, -EIO); |
927 | } | 927 | } |
928 | }; | 928 | }; |
929 | 929 | ||
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index a49a9c8f2cb1..76c4c8d13073 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -54,33 +54,33 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
54 | 54 | ||
55 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && | 55 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && |
56 | req->cmd[0] == REQ_LB_OP_DISCARD) | 56 | req->cmd[0] == REQ_LB_OP_DISCARD) |
57 | return !tr->discard(dev, block, nsect); | 57 | return tr->discard(dev, block, nsect); |
58 | 58 | ||
59 | if (!blk_fs_request(req)) | 59 | if (!blk_fs_request(req)) |
60 | return 0; | 60 | return -EIO; |
61 | 61 | ||
62 | if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) | 62 | if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) |
63 | return 0; | 63 | return -EIO; |
64 | 64 | ||
65 | switch(rq_data_dir(req)) { | 65 | switch(rq_data_dir(req)) { |
66 | case READ: | 66 | case READ: |
67 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 67 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
68 | if (tr->readsect(dev, block, buf)) | 68 | if (tr->readsect(dev, block, buf)) |
69 | return 0; | 69 | return -EIO; |
70 | return 1; | 70 | return 0; |
71 | 71 | ||
72 | case WRITE: | 72 | case WRITE: |
73 | if (!tr->writesect) | 73 | if (!tr->writesect) |
74 | return 0; | 74 | return -EIO; |
75 | 75 | ||
76 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 76 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
77 | if (tr->writesect(dev, block, buf)) | 77 | if (tr->writesect(dev, block, buf)) |
78 | return 0; | 78 | return -EIO; |
79 | return 1; | 79 | return 0; |
80 | 80 | ||
81 | default: | 81 | default: |
82 | printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); | 82 | printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); |
83 | return 0; | 83 | return -EIO; |
84 | } | 84 | } |
85 | } | 85 | } |
86 | 86 | ||
@@ -96,7 +96,7 @@ static int mtd_blktrans_thread(void *arg) | |||
96 | while (!kthread_should_stop()) { | 96 | while (!kthread_should_stop()) { |
97 | struct request *req; | 97 | struct request *req; |
98 | struct mtd_blktrans_dev *dev; | 98 | struct mtd_blktrans_dev *dev; |
99 | int res = 0; | 99 | int res; |
100 | 100 | ||
101 | req = elv_next_request(rq); | 101 | req = elv_next_request(rq); |
102 | 102 | ||
@@ -119,7 +119,7 @@ static int mtd_blktrans_thread(void *arg) | |||
119 | 119 | ||
120 | spin_lock_irq(rq->queue_lock); | 120 | spin_lock_irq(rq->queue_lock); |
121 | 121 | ||
122 | end_request(req, res); | 122 | __blk_end_request_cur(req, res); |
123 | } | 123 | } |
124 | spin_unlock_irq(rq->queue_lock); | 124 | spin_unlock_irq(rq->queue_lock); |
125 | 125 | ||
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index a85ad05e8548..09617884a50b 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c | |||
@@ -192,25 +192,25 @@ static void jsfd_do_request(struct request_queue *q) | |||
192 | size_t len = req->current_nr_sectors << 9; | 192 | size_t len = req->current_nr_sectors << 9; |
193 | 193 | ||
194 | if ((offset + len) > jdp->dsize) { | 194 | if ((offset + len) > jdp->dsize) { |
195 | end_request(req, 0); | 195 | __blk_end_request_cur(req, -EIO); |
196 | continue; | 196 | continue; |
197 | } | 197 | } |
198 | 198 | ||
199 | if (rq_data_dir(req) != READ) { | 199 | if (rq_data_dir(req) != READ) { |
200 | printk(KERN_ERR "jsfd: write\n"); | 200 | printk(KERN_ERR "jsfd: write\n"); |
201 | end_request(req, 0); | 201 | __blk_end_request_cur(req, -EIO); |
202 | continue; | 202 | continue; |
203 | } | 203 | } |
204 | 204 | ||
205 | if ((jdp->dbase & 0xff000000) != 0x20000000) { | 205 | if ((jdp->dbase & 0xff000000) != 0x20000000) { |
206 | printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase); | 206 | printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase); |
207 | end_request(req, 0); | 207 | __blk_end_request_cur(req, -EIO); |
208 | continue; | 208 | continue; |
209 | } | 209 | } |
210 | 210 | ||
211 | jsfd_read(req->buffer, jdp->dbase + offset, len); | 211 | jsfd_read(req->buffer, jdp->dbase + offset, len); |
212 | 212 | ||
213 | end_request(req, 1); | 213 | __blk_end_request_cur(req, 0); |
214 | } | 214 | } |
215 | } | 215 | } |
216 | 216 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e33c8356b3da..cfeb3c2feb27 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -845,9 +845,8 @@ extern unsigned int blk_rq_cur_bytes(struct request *rq); | |||
845 | * blk_update_request() completes given number of bytes and updates | 845 | * blk_update_request() completes given number of bytes and updates |
846 | * the request without completing it. | 846 | * the request without completing it. |
847 | * | 847 | * |
848 | * blk_end_request() and friends. __blk_end_request() and | 848 | * blk_end_request() and friends. __blk_end_request() must be called |
849 | * end_request() must be called with the request queue spinlock | 849 | * with the request queue spinlock acquired. |
850 | * acquired. | ||
851 | * | 850 | * |
852 | * Several drivers define their own end_request and call | 851 | * Several drivers define their own end_request and call |
853 | * blk_end_request() for parts of the original function. | 852 | * blk_end_request() for parts of the original function. |
@@ -899,6 +898,19 @@ static inline void blk_end_request_all(struct request *rq, int error) | |||
899 | } | 898 | } |
900 | 899 | ||
901 | /** | 900 | /** |
901 | * blk_end_request_cur - Helper function to finish the current request chunk. | ||
902 | * @rq: the request to finish the current chunk for | ||
903 | * @err: %0 for success, < %0 for error | ||
904 | * | ||
905 | * Description: | ||
906 | * Complete the current consecutively mapped chunk from @rq. | ||
907 | */ | ||
908 | static inline void blk_end_request_cur(struct request *rq, int error) | ||
909 | { | ||
910 | blk_end_request(rq, error, rq->hard_cur_sectors << 9); | ||
911 | } | ||
912 | |||
913 | /** | ||
902 | * __blk_end_request - Helper function for drivers to complete the request. | 914 | * __blk_end_request - Helper function for drivers to complete the request. |
903 | * @rq: the request being processed | 915 | * @rq: the request being processed |
904 | * @error: %0 for success, < %0 for error | 916 | * @error: %0 for success, < %0 for error |
@@ -934,29 +946,17 @@ static inline void __blk_end_request_all(struct request *rq, int error) | |||
934 | } | 946 | } |
935 | 947 | ||
936 | /** | 948 | /** |
937 | * end_request - end I/O on the current segment of the request | 949 | * __blk_end_request_cur - Helper function to finish the current request chunk. |
938 | * @rq: the request being processed | 950 | * @rq: the request to finish the current chunk for |
939 | * @uptodate: error value or %0/%1 uptodate flag | 951 | * @err: %0 for success, < %0 for error |
940 | * | 952 | * |
941 | * Description: | 953 | * Description: |
942 | * Ends I/O on the current segment of a request. If that is the only | 954 | * Complete the current consecutively mapped chunk from @rq. Must |
943 | * remaining segment, the request is also completed and freed. | 955 | * be called with queue lock held. |
944 | * | 956 | */ |
945 | * This is a remnant of how older block drivers handled I/O completions. | 957 | static inline void __blk_end_request_cur(struct request *rq, int error) |
946 | * Modern drivers typically end I/O on the full request in one go, unless | ||
947 | * they have a residual value to account for. For that case this function | ||
948 | * isn't really useful, unless the residual just happens to be the | ||
949 | * full current segment. In other words, don't use this function in new | ||
950 | * code. Use blk_end_request() or __blk_end_request() to end a request. | ||
951 | **/ | ||
952 | static inline void end_request(struct request *rq, int uptodate) | ||
953 | { | 958 | { |
954 | int error = 0; | 959 | __blk_end_request(rq, error, rq->hard_cur_sectors << 9); |
955 | |||
956 | if (uptodate <= 0) | ||
957 | error = uptodate ? uptodate : -EIO; | ||
958 | |||
959 | __blk_end_bidi_request(rq, error, rq->hard_cur_sectors << 9, 0); | ||
960 | } | 960 | } |
961 | 961 | ||
962 | extern void blk_complete_request(struct request *); | 962 | extern void blk_complete_request(struct request *); |