diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 87 |
1 files changed, 29 insertions, 58 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index bb218c8b6e9..dd3f9d2b99f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, | |||
240 | * is invalid. Prevent the garbage from being misinterpreted | 240 | * is invalid. Prevent the garbage from being misinterpreted |
241 | * and prevent security leaks by zeroing out the excess data. | 241 | * and prevent security leaks by zeroing out the excess data. |
242 | */ | 242 | */ |
243 | if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) | 243 | if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) |
244 | memset(buffer + (bufflen - req->data_len), 0, req->data_len); | 244 | memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); |
245 | 245 | ||
246 | if (resid) | 246 | if (resid) |
247 | *resid = req->data_len; | 247 | *resid = req->resid_len; |
248 | ret = req->errors; | 248 | ret = req->errors; |
249 | out: | 249 | out: |
250 | blk_put_request(req); | 250 | blk_put_request(req); |
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, | |||
546 | * to queue the remainder of them. | 546 | * to queue the remainder of them. |
547 | */ | 547 | */ |
548 | if (blk_end_request(req, error, bytes)) { | 548 | if (blk_end_request(req, error, bytes)) { |
549 | int leftover = (req->hard_nr_sectors << 9); | ||
550 | |||
551 | if (blk_pc_request(req)) | ||
552 | leftover = req->data_len; | ||
553 | |||
554 | /* kill remainder if no retrys */ | 549 | /* kill remainder if no retrys */ |
555 | if (error && scsi_noretry_cmd(cmd)) | 550 | if (error && scsi_noretry_cmd(cmd)) |
556 | blk_end_request(req, error, leftover); | 551 | blk_end_request_all(req, error); |
557 | else { | 552 | else { |
558 | if (requeue) { | 553 | if (requeue) { |
559 | /* | 554 | /* |
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd) | |||
673 | EXPORT_SYMBOL(scsi_release_buffers); | 668 | EXPORT_SYMBOL(scsi_release_buffers); |
674 | 669 | ||
675 | /* | 670 | /* |
676 | * Bidi commands Must be complete as a whole, both sides at once. | ||
677 | * If part of the bytes were written and lld returned | ||
678 | * scsi_in()->resid and/or scsi_out()->resid this information will be left | ||
679 | * in req->data_len and req->next_rq->data_len. The upper-layer driver can | ||
680 | * decide what to do with this information. | ||
681 | */ | ||
682 | static void scsi_end_bidi_request(struct scsi_cmnd *cmd) | ||
683 | { | ||
684 | struct request *req = cmd->request; | ||
685 | unsigned int dlen = req->data_len; | ||
686 | unsigned int next_dlen = req->next_rq->data_len; | ||
687 | |||
688 | req->data_len = scsi_out(cmd)->resid; | ||
689 | req->next_rq->data_len = scsi_in(cmd)->resid; | ||
690 | |||
691 | /* The req and req->next_rq have not been completed */ | ||
692 | BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); | ||
693 | |||
694 | scsi_release_buffers(cmd); | ||
695 | |||
696 | /* | ||
697 | * This will goose the queue request function at the end, so we don't | ||
698 | * need to worry about launching another command. | ||
699 | */ | ||
700 | scsi_next_command(cmd); | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * Function: scsi_io_completion() | 671 | * Function: scsi_io_completion() |
705 | * | 672 | * |
706 | * Purpose: Completion processing for block device I/O requests. | 673 | * Purpose: Completion processing for block device I/O requests. |
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd) | |||
739 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | 706 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
740 | { | 707 | { |
741 | int result = cmd->result; | 708 | int result = cmd->result; |
742 | int this_count; | ||
743 | struct request_queue *q = cmd->device->request_queue; | 709 | struct request_queue *q = cmd->device->request_queue; |
744 | struct request *req = cmd->request; | 710 | struct request *req = cmd->request; |
745 | int error = 0; | 711 | int error = 0; |
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
773 | if (!sense_deferred) | 739 | if (!sense_deferred) |
774 | error = -EIO; | 740 | error = -EIO; |
775 | } | 741 | } |
742 | |||
743 | req->resid_len = scsi_get_resid(cmd); | ||
744 | |||
776 | if (scsi_bidi_cmnd(cmd)) { | 745 | if (scsi_bidi_cmnd(cmd)) { |
777 | /* will also release_buffers */ | 746 | /* |
778 | scsi_end_bidi_request(cmd); | 747 | * Bidi commands Must be complete as a whole, |
748 | * both sides at once. | ||
749 | */ | ||
750 | req->next_rq->resid_len = scsi_in(cmd)->resid; | ||
751 | |||
752 | blk_end_request_all(req, 0); | ||
753 | |||
754 | scsi_release_buffers(cmd); | ||
755 | scsi_next_command(cmd); | ||
779 | return; | 756 | return; |
780 | } | 757 | } |
781 | req->data_len = scsi_get_resid(cmd); | ||
782 | } | 758 | } |
783 | 759 | ||
784 | BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ | 760 | BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ |
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
787 | * Next deal with any sectors which we were able to correctly | 763 | * Next deal with any sectors which we were able to correctly |
788 | * handle. | 764 | * handle. |
789 | */ | 765 | */ |
790 | SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " | 766 | SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, " |
791 | "%d bytes done.\n", | 767 | "%d bytes done.\n", |
792 | req->nr_sectors, good_bytes)); | 768 | blk_rq_sectors(req), good_bytes)); |
793 | 769 | ||
794 | /* | 770 | /* |
795 | * Recovered errors need reporting, but they're always treated | 771 | * Recovered errors need reporting, but they're always treated |
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
812 | */ | 788 | */ |
813 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) | 789 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) |
814 | return; | 790 | return; |
815 | this_count = blk_rq_bytes(req); | ||
816 | 791 | ||
817 | error = -EIO; | 792 | error = -EIO; |
818 | 793 | ||
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
922 | if (driver_byte(result) & DRIVER_SENSE) | 897 | if (driver_byte(result) & DRIVER_SENSE) |
923 | scsi_print_sense("", cmd); | 898 | scsi_print_sense("", cmd); |
924 | } | 899 | } |
925 | blk_end_request(req, -EIO, blk_rq_bytes(req)); | 900 | blk_end_request_all(req, -EIO); |
926 | scsi_next_command(cmd); | 901 | scsi_next_command(cmd); |
927 | break; | 902 | break; |
928 | case ACTION_REPREP: | 903 | case ACTION_REPREP: |
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | |||
965 | count = blk_rq_map_sg(req->q, req, sdb->table.sgl); | 940 | count = blk_rq_map_sg(req->q, req, sdb->table.sgl); |
966 | BUG_ON(count > sdb->table.nents); | 941 | BUG_ON(count > sdb->table.nents); |
967 | sdb->table.nents = count; | 942 | sdb->table.nents = count; |
968 | if (blk_pc_request(req)) | 943 | sdb->length = blk_rq_bytes(req); |
969 | sdb->length = req->data_len; | ||
970 | else | ||
971 | sdb->length = req->nr_sectors << 9; | ||
972 | return BLKPREP_OK; | 944 | return BLKPREP_OK; |
973 | } | 945 | } |
974 | 946 | ||
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | |||
1087 | if (unlikely(ret)) | 1059 | if (unlikely(ret)) |
1088 | return ret; | 1060 | return ret; |
1089 | } else { | 1061 | } else { |
1090 | BUG_ON(req->data_len); | 1062 | BUG_ON(blk_rq_bytes(req)); |
1091 | BUG_ON(req->data); | ||
1092 | 1063 | ||
1093 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | 1064 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
1094 | req->buffer = NULL; | 1065 | req->buffer = NULL; |
1095 | } | 1066 | } |
1096 | 1067 | ||
1097 | cmd->cmd_len = req->cmd_len; | 1068 | cmd->cmd_len = req->cmd_len; |
1098 | if (!req->data_len) | 1069 | if (!blk_rq_bytes(req)) |
1099 | cmd->sc_data_direction = DMA_NONE; | 1070 | cmd->sc_data_direction = DMA_NONE; |
1100 | else if (rq_data_dir(req) == WRITE) | 1071 | else if (rq_data_dir(req) == WRITE) |
1101 | cmd->sc_data_direction = DMA_TO_DEVICE; | 1072 | cmd->sc_data_direction = DMA_TO_DEVICE; |
1102 | else | 1073 | else |
1103 | cmd->sc_data_direction = DMA_FROM_DEVICE; | 1074 | cmd->sc_data_direction = DMA_FROM_DEVICE; |
1104 | 1075 | ||
1105 | cmd->transfersize = req->data_len; | 1076 | cmd->transfersize = blk_rq_bytes(req); |
1106 | cmd->allowed = req->retries; | 1077 | cmd->allowed = req->retries; |
1107 | return BLKPREP_OK; | 1078 | return BLKPREP_OK; |
1108 | } | 1079 | } |
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) | |||
1212 | break; | 1183 | break; |
1213 | case BLKPREP_DEFER: | 1184 | case BLKPREP_DEFER: |
1214 | /* | 1185 | /* |
1215 | * If we defer, the elv_next_request() returns NULL, but the | 1186 | * If we defer, the blk_peek_request() returns NULL, but the |
1216 | * queue must be restarted, so we plug here if no returning | 1187 | * queue must be restarted, so we plug here if no returning |
1217 | * command will automatically do that. | 1188 | * command will automatically do that. |
1218 | */ | 1189 | */ |
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
1388 | struct scsi_target *starget = scsi_target(sdev); | 1359 | struct scsi_target *starget = scsi_target(sdev); |
1389 | struct Scsi_Host *shost = sdev->host; | 1360 | struct Scsi_Host *shost = sdev->host; |
1390 | 1361 | ||
1391 | blkdev_dequeue_request(req); | 1362 | blk_start_request(req); |
1392 | 1363 | ||
1393 | if (unlikely(cmd == NULL)) { | 1364 | if (unlikely(cmd == NULL)) { |
1394 | printk(KERN_CRIT "impossible request in %s.\n", | 1365 | printk(KERN_CRIT "impossible request in %s.\n", |
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q) | |||
1480 | 1451 | ||
1481 | if (!sdev) { | 1452 | if (!sdev) { |
1482 | printk("scsi: killing requests for dead queue\n"); | 1453 | printk("scsi: killing requests for dead queue\n"); |
1483 | while ((req = elv_next_request(q)) != NULL) | 1454 | while ((req = blk_peek_request(q)) != NULL) |
1484 | scsi_kill_request(req, q); | 1455 | scsi_kill_request(req, q); |
1485 | return; | 1456 | return; |
1486 | } | 1457 | } |
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q) | |||
1501 | * that the request is fully prepared even if we cannot | 1472 | * that the request is fully prepared even if we cannot |
1502 | * accept it. | 1473 | * accept it. |
1503 | */ | 1474 | */ |
1504 | req = elv_next_request(q); | 1475 | req = blk_peek_request(q); |
1505 | if (!req || !scsi_dev_queue_ready(q, sdev)) | 1476 | if (!req || !scsi_dev_queue_ready(q, sdev)) |
1506 | break; | 1477 | break; |
1507 | 1478 | ||
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q) | |||
1517 | * Remove the request from the request list. | 1488 | * Remove the request from the request list. |
1518 | */ | 1489 | */ |
1519 | if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) | 1490 | if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) |
1520 | blkdev_dequeue_request(req); | 1491 | blk_start_request(req); |
1521 | sdev->device_busy++; | 1492 | sdev->device_busy++; |
1522 | 1493 | ||
1523 | spin_unlock(q->queue_lock); | 1494 | spin_unlock(q->queue_lock); |