diff options
author | Christoph Hellwig <hch@lst.de> | 2014-05-01 10:51:03 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2014-05-19 06:33:41 -0400 |
commit | bc85dc500f9df9b2eec15077e5046672c46adeaa (patch) | |
tree | 66441e48e4100b1ee9a769a642800eaf77933634 | |
parent | c682adf3e1176095a665716a0b62fead8f4b8f5c (diff) |
scsi: remove scsi_end_request
By folding scsi_end_request into its only caller we can significantly clean
up the completion logic. We can use simple goto labels now to only have
a single place to finish or requeue command there instead of the previous
convoluted logic.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Nicholas Bellinger <nab@linux-iscsi.org>
Reviewed-by: Mike Christie <michaelc@cs.wisc.edu>
Reviewed-by: Hannes Reinecke <hare@suse.de>
-rw-r--r-- | drivers/scsi/scsi_lib.c | 114 |
1 files changed, 32 insertions, 82 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f416b5932a45..045822befad9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -512,66 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost) | |||
512 | scsi_run_queue(sdev->request_queue); | 512 | scsi_run_queue(sdev->request_queue); |
513 | } | 513 | } |
514 | 514 | ||
515 | /* | ||
516 | * Function: scsi_end_request() | ||
517 | * | ||
518 | * Purpose: Post-processing of completed commands (usually invoked at end | ||
519 | * of upper level post-processing and scsi_io_completion). | ||
520 | * | ||
521 | * Arguments: cmd - command that is complete. | ||
522 | * error - 0 if I/O indicates success, < 0 for I/O error. | ||
523 | * bytes - number of bytes of completed I/O | ||
524 | * requeue - indicates whether we should requeue leftovers. | ||
525 | * | ||
526 | * Lock status: Assumed that lock is not held upon entry. | ||
527 | * | ||
528 | * Returns: cmd if requeue required, NULL otherwise. | ||
529 | * | ||
530 | * Notes: This is called for block device requests in order to | ||
531 | * mark some number of sectors as complete. | ||
532 | * | ||
533 | * We are guaranteeing that the request queue will be goosed | ||
534 | * at some point during this call. | ||
535 | * Notes: If cmd was requeued, upon return it will be a stale pointer. | ||
536 | */ | ||
537 | static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, | ||
538 | int bytes, int requeue) | ||
539 | { | ||
540 | struct request_queue *q = cmd->device->request_queue; | ||
541 | struct request *req = cmd->request; | ||
542 | |||
543 | /* | ||
544 | * If there are blocks left over at the end, set up the command | ||
545 | * to queue the remainder of them. | ||
546 | */ | ||
547 | if (blk_end_request(req, error, bytes)) { | ||
548 | /* kill remainder if no retrys */ | ||
549 | if (error && scsi_noretry_cmd(cmd)) | ||
550 | blk_end_request_all(req, error); | ||
551 | else { | ||
552 | if (requeue) { | ||
553 | /* | ||
554 | * Bleah. Leftovers again. Stick the | ||
555 | * leftovers in the front of the | ||
556 | * queue, and goose the queue again. | ||
557 | */ | ||
558 | scsi_release_buffers(cmd); | ||
559 | scsi_requeue_command(q, cmd); | ||
560 | cmd = NULL; | ||
561 | } | ||
562 | return cmd; | ||
563 | } | ||
564 | } | ||
565 | |||
566 | /* | ||
567 | * This will goose the queue request function at the end, so we don't | ||
568 | * need to worry about launching another command. | ||
569 | */ | ||
570 | scsi_release_buffers(cmd); | ||
571 | scsi_next_command(cmd); | ||
572 | return NULL; | ||
573 | } | ||
574 | |||
575 | static inline unsigned int scsi_sgtable_index(unsigned short nents) | 515 | static inline unsigned int scsi_sgtable_index(unsigned short nents) |
576 | { | 516 | { |
577 | unsigned int index; | 517 | unsigned int index; |
@@ -717,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) | |||
717 | * | 657 | * |
718 | * Returns: Nothing | 658 | * Returns: Nothing |
719 | * | 659 | * |
720 | * Notes: This function is matched in terms of capabilities to | 660 | * Notes: We will finish off the specified number of sectors. If we |
721 | * the function that created the scatter-gather list. | 661 | * are done, the command block will be released and the queue |
722 | * In other words, if there are no bounce buffers | 662 | * function will be goosed. If we are not done then we have to |
723 | * (the normal case for most drivers), we don't need | ||
724 | * the logic to deal with cleaning up afterwards. | ||
725 | * | ||
726 | * We must call scsi_end_request(). This will finish off | ||
727 | * the specified number of sectors. If we are done, the | ||
728 | * command block will be released and the queue function | ||
729 | * will be goosed. If we are not done then we have to | ||
730 | * figure out what to do next: | 663 | * figure out what to do next: |
731 | * | 664 | * |
732 | * a) We can call scsi_requeue_command(). The request | 665 | * a) We can call scsi_requeue_command(). The request |
@@ -735,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) | |||
735 | * be used if we made forward progress, or if we want | 668 | * be used if we made forward progress, or if we want |
736 | * to switch from READ(10) to READ(6) for example. | 669 | * to switch from READ(10) to READ(6) for example. |
737 | * | 670 | * |
738 | * b) We can call scsi_queue_insert(). The request will | 671 | * b) We can call __scsi_queue_insert(). The request will |
739 | * be put back on the queue and retried using the same | 672 | * be put back on the queue and retried using the same |
740 | * command as before, possibly after a delay. | 673 | * command as before, possibly after a delay. |
741 | * | 674 | * |
@@ -794,6 +727,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
794 | 727 | ||
795 | scsi_release_buffers(cmd); | 728 | scsi_release_buffers(cmd); |
796 | scsi_release_bidi_buffers(cmd); | 729 | scsi_release_bidi_buffers(cmd); |
730 | |||
797 | blk_end_request_all(req, 0); | 731 | blk_end_request_all(req, 0); |
798 | 732 | ||
799 | scsi_next_command(cmd); | 733 | scsi_next_command(cmd); |
@@ -833,12 +767,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
833 | } | 767 | } |
834 | 768 | ||
835 | /* | 769 | /* |
836 | * A number of bytes were successfully read. If there | 770 | * If we finished all bytes in the request we are done now. |
837 | * are leftovers and there is some kind of error | ||
838 | * (result != 0), retry the rest. | ||
839 | */ | 771 | */ |
840 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) | 772 | if (!blk_end_request(req, error, good_bytes)) |
841 | return; | 773 | goto next_command; |
774 | |||
775 | /* | ||
776 | * Kill remainder if no retrys. | ||
777 | */ | ||
778 | if (error && scsi_noretry_cmd(cmd)) { | ||
779 | blk_end_request_all(req, error); | ||
780 | goto next_command; | ||
781 | } | ||
782 | |||
783 | /* | ||
784 | * If there had been no error, but we have leftover bytes in the | ||
785 | * requeues just queue the command up again. | ||
786 | */ | ||
787 | if (result == 0) | ||
788 | goto requeue; | ||
842 | 789 | ||
843 | error = __scsi_error_from_host_byte(cmd, result); | 790 | error = __scsi_error_from_host_byte(cmd, result); |
844 | 791 | ||
@@ -966,7 +913,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
966 | switch (action) { | 913 | switch (action) { |
967 | case ACTION_FAIL: | 914 | case ACTION_FAIL: |
968 | /* Give up and fail the remainder of the request */ | 915 | /* Give up and fail the remainder of the request */ |
969 | scsi_release_buffers(cmd); | ||
970 | if (!(req->cmd_flags & REQ_QUIET)) { | 916 | if (!(req->cmd_flags & REQ_QUIET)) { |
971 | if (description) | 917 | if (description) |
972 | scmd_printk(KERN_INFO, cmd, "%s\n", | 918 | scmd_printk(KERN_INFO, cmd, "%s\n", |
@@ -976,12 +922,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
976 | scsi_print_sense("", cmd); | 922 | scsi_print_sense("", cmd); |
977 | scsi_print_command(cmd); | 923 | scsi_print_command(cmd); |
978 | } | 924 | } |
979 | if (blk_end_request_err(req, error)) | 925 | if (!blk_end_request_err(req, error)) |
980 | scsi_requeue_command(q, cmd); | 926 | goto next_command; |
981 | else | 927 | /*FALLTHRU*/ |
982 | scsi_next_command(cmd); | ||
983 | break; | ||
984 | case ACTION_REPREP: | 928 | case ACTION_REPREP: |
929 | requeue: | ||
985 | /* Unprep the request and put it back at the head of the queue. | 930 | /* Unprep the request and put it back at the head of the queue. |
986 | * A new command will be prepared and issued. | 931 | * A new command will be prepared and issued. |
987 | */ | 932 | */ |
@@ -997,6 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
997 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); | 942 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); |
998 | break; | 943 | break; |
999 | } | 944 | } |
945 | return; | ||
946 | |||
947 | next_command: | ||
948 | scsi_release_buffers(cmd); | ||
949 | scsi_next_command(cmd); | ||
1000 | } | 950 | } |
1001 | 951 | ||
1002 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | 952 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, |