aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/ide-scsi.c8
-rw-r--r--drivers/scsi/scsi_lib.c171
-rw-r--r--drivers/scsi/scsi_tgt_lib.c3
3 files changed, 38 insertions, 144 deletions
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 02e91893064..db8bc20539e 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -919,8 +919,8 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
919 } 919 }
920 920
921 /* kill current request */ 921 /* kill current request */
922 blkdev_dequeue_request(req); 922 if (__blk_end_request(req, -EIO, 0))
923 end_that_request_last(req, 0); 923 BUG();
924 if (blk_sense_request(req)) 924 if (blk_sense_request(req))
925 kfree(scsi->pc->buffer); 925 kfree(scsi->pc->buffer);
926 kfree(scsi->pc); 926 kfree(scsi->pc);
@@ -929,8 +929,8 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
929 929
930 /* now nuke the drive queue */ 930 /* now nuke the drive queue */
931 while ((req = elv_next_request(drive->queue))) { 931 while ((req = elv_next_request(drive->queue))) {
932 blkdev_dequeue_request(req); 932 if (__blk_end_request(req, -EIO, 0))
933 end_that_request_last(req, 0); 933 BUG();
934 } 934 }
935 935
936 HWGROUP(drive)->rq = NULL; 936 HWGROUP(drive)->rq = NULL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4cf902efbdb..7c4c889c522 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -634,7 +634,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
634 * of upper level post-processing and scsi_io_completion). 634 * of upper level post-processing and scsi_io_completion).
635 * 635 *
636 * Arguments: cmd - command that is complete. 636 * Arguments: cmd - command that is complete.
637 * uptodate - 1 if I/O indicates success, <= 0 for I/O error. 637 * error - 0 if I/O indicates success, < 0 for I/O error.
638 * bytes - number of bytes of completed I/O 638 * bytes - number of bytes of completed I/O
639 * requeue - indicates whether we should requeue leftovers. 639 * requeue - indicates whether we should requeue leftovers.
640 * 640 *
@@ -649,26 +649,25 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
649 * at some point during this call. 649 * at some point during this call.
650 * Notes: If cmd was requeued, upon return it will be a stale pointer. 650 * Notes: If cmd was requeued, upon return it will be a stale pointer.
651 */ 651 */
652static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 652static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
653 int bytes, int requeue) 653 int bytes, int requeue)
654{ 654{
655 struct request_queue *q = cmd->device->request_queue; 655 struct request_queue *q = cmd->device->request_queue;
656 struct request *req = cmd->request; 656 struct request *req = cmd->request;
657 unsigned long flags;
658 657
659 /* 658 /*
660 * If there are blocks left over at the end, set up the command 659 * If there are blocks left over at the end, set up the command
661 * to queue the remainder of them. 660 * to queue the remainder of them.
662 */ 661 */
663 if (end_that_request_chunk(req, uptodate, bytes)) { 662 if (blk_end_request(req, error, bytes)) {
664 int leftover = (req->hard_nr_sectors << 9); 663 int leftover = (req->hard_nr_sectors << 9);
665 664
666 if (blk_pc_request(req)) 665 if (blk_pc_request(req))
667 leftover = req->data_len; 666 leftover = req->data_len;
668 667
669 /* kill remainder if no retrys */ 668 /* kill remainder if no retrys */
670 if (!uptodate && blk_noretry_request(req)) 669 if (error && blk_noretry_request(req))
671 end_that_request_chunk(req, 0, leftover); 670 blk_end_request(req, error, leftover);
672 else { 671 else {
673 if (requeue) { 672 if (requeue) {
674 /* 673 /*
@@ -683,14 +682,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
683 } 682 }
684 } 683 }
685 684
686 add_disk_randomness(req->rq_disk);
687
688 spin_lock_irqsave(q->queue_lock, flags);
689 if (blk_rq_tagged(req))
690 blk_queue_end_tag(q, req);
691 end_that_request_last(req, uptodate);
692 spin_unlock_irqrestore(q->queue_lock, flags);
693
694 /* 685 /*
695 * This will goose the queue request function at the end, so we don't 686 * This will goose the queue request function at the end, so we don't
696 * need to worry about launching another command. 687 * need to worry about launching another command.
@@ -739,138 +730,43 @@ static inline unsigned int scsi_sgtable_index(unsigned short nents)
739 return index; 730 return index;
740} 731}
741 732
742struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 733static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
743{ 734{
744 struct scsi_host_sg_pool *sgp; 735 struct scsi_host_sg_pool *sgp;
745 struct scatterlist *sgl, *prev, *ret;
746 unsigned int index;
747 int this, left;
748
749 BUG_ON(!cmd->use_sg);
750
751 left = cmd->use_sg;
752 ret = prev = NULL;
753 do {
754 this = left;
755 if (this > SCSI_MAX_SG_SEGMENTS) {
756 this = SCSI_MAX_SG_SEGMENTS - 1;
757 index = SG_MEMPOOL_NR - 1;
758 } else
759 index = scsi_sgtable_index(this);
760
761 left -= this;
762 736
763 sgp = scsi_sg_pools + index; 737 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
764 738 mempool_free(sgl, sgp->pool);
765 sgl = mempool_alloc(sgp->pool, gfp_mask); 739}
766 if (unlikely(!sgl))
767 goto enomem;
768 740
769 sg_init_table(sgl, sgp->size); 741static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
742{
743 struct scsi_host_sg_pool *sgp;
770 744
771 /* 745 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
772 * first loop through, set initial index and return value 746 return mempool_alloc(sgp->pool, gfp_mask);
773 */ 747}
774 if (!ret)
775 ret = sgl;
776 748
777 /* 749int scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
778 * chain previous sglist, if any. we know the previous 750{
779 * sglist must be the biggest one, or we would not have 751 int ret;
780 * ended up doing another loop.
781 */
782 if (prev)
783 sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
784 752
785 /* 753 BUG_ON(!cmd->use_sg);
786 * if we have nothing left, mark the last segment as
787 * end-of-list
788 */
789 if (!left)
790 sg_mark_end(&sgl[this - 1]);
791 754
792 /* 755 ret = __sg_alloc_table(&cmd->sg_table, cmd->use_sg,
793 * don't allow subsequent mempool allocs to sleep, it would 756 SCSI_MAX_SG_SEGMENTS, gfp_mask, scsi_sg_alloc);
794 * violate the mempool principle. 757 if (unlikely(ret))
795 */ 758 __sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS,
796 gfp_mask &= ~__GFP_WAIT; 759 scsi_sg_free);
797 gfp_mask |= __GFP_HIGH;
798 prev = sgl;
799 } while (left);
800 760
801 /* 761 cmd->request_buffer = cmd->sg_table.sgl;
802 * ->use_sg may get modified after dma mapping has potentially
803 * shrunk the number of segments, so keep a copy of it for free.
804 */
805 cmd->__use_sg = cmd->use_sg;
806 return ret; 762 return ret;
807enomem:
808 if (ret) {
809 /*
810 * Free entries chained off ret. Since we were trying to
811 * allocate another sglist, we know that all entries are of
812 * the max size.
813 */
814 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
815 prev = ret;
816 ret = &ret[SCSI_MAX_SG_SEGMENTS - 1];
817
818 while ((sgl = sg_chain_ptr(ret)) != NULL) {
819 ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1];
820 mempool_free(sgl, sgp->pool);
821 }
822
823 mempool_free(prev, sgp->pool);
824 }
825 return NULL;
826} 763}
827 764
828EXPORT_SYMBOL(scsi_alloc_sgtable); 765EXPORT_SYMBOL(scsi_alloc_sgtable);
829 766
830void scsi_free_sgtable(struct scsi_cmnd *cmd) 767void scsi_free_sgtable(struct scsi_cmnd *cmd)
831{ 768{
832 struct scatterlist *sgl = cmd->request_buffer; 769 __sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
833 struct scsi_host_sg_pool *sgp;
834
835 /*
836 * if this is the biggest size sglist, check if we have
837 * chained parts we need to free
838 */
839 if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) {
840 unsigned short this, left;
841 struct scatterlist *next;
842 unsigned int index;
843
844 left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1);
845 next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]);
846 while (left && next) {
847 sgl = next;
848 this = left;
849 if (this > SCSI_MAX_SG_SEGMENTS) {
850 this = SCSI_MAX_SG_SEGMENTS - 1;
851 index = SG_MEMPOOL_NR - 1;
852 } else
853 index = scsi_sgtable_index(this);
854
855 left -= this;
856
857 sgp = scsi_sg_pools + index;
858
859 if (left)
860 next = sg_chain_ptr(&sgl[sgp->size - 1]);
861
862 mempool_free(sgl, sgp->pool);
863 }
864
865 /*
866 * Restore original, will be freed below
867 */
868 sgl = cmd->request_buffer;
869 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
870 } else
871 sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg);
872
873 mempool_free(sgl, sgp->pool);
874} 770}
875 771
876EXPORT_SYMBOL(scsi_free_sgtable); 772EXPORT_SYMBOL(scsi_free_sgtable);
@@ -987,7 +883,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
987 * are leftovers and there is some kind of error 883 * are leftovers and there is some kind of error
988 * (result != 0), retry the rest. 884 * (result != 0), retry the rest.
989 */ 885 */
990 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) 886 if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL)
991 return; 887 return;
992 888
993 /* good_bytes = 0, or (inclusive) there were leftovers and 889 /* good_bytes = 0, or (inclusive) there were leftovers and
@@ -1001,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1001 * and quietly refuse further access. 897 * and quietly refuse further access.
1002 */ 898 */
1003 cmd->device->changed = 1; 899 cmd->device->changed = 1;
1004 scsi_end_request(cmd, 0, this_count, 1); 900 scsi_end_request(cmd, -EIO, this_count, 1);
1005 return; 901 return;
1006 } else { 902 } else {
1007 /* Must have been a power glitch, or a 903 /* Must have been a power glitch, or a
@@ -1033,7 +929,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1033 scsi_requeue_command(q, cmd); 929 scsi_requeue_command(q, cmd);
1034 return; 930 return;
1035 } else { 931 } else {
1036 scsi_end_request(cmd, 0, this_count, 1); 932 scsi_end_request(cmd, -EIO, this_count, 1);
1037 return; 933 return;
1038 } 934 }
1039 break; 935 break;
@@ -1061,7 +957,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1061 "Device not ready", 957 "Device not ready",
1062 &sshdr); 958 &sshdr);
1063 959
1064 scsi_end_request(cmd, 0, this_count, 1); 960 scsi_end_request(cmd, -EIO, this_count, 1);
1065 return; 961 return;
1066 case VOLUME_OVERFLOW: 962 case VOLUME_OVERFLOW:
1067 if (!(req->cmd_flags & REQ_QUIET)) { 963 if (!(req->cmd_flags & REQ_QUIET)) {
@@ -1071,7 +967,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1071 scsi_print_sense("", cmd); 967 scsi_print_sense("", cmd);
1072 } 968 }
1073 /* See SSC3rXX or current. */ 969 /* See SSC3rXX or current. */
1074 scsi_end_request(cmd, 0, this_count, 1); 970 scsi_end_request(cmd, -EIO, this_count, 1);
1075 return; 971 return;
1076 default: 972 default:
1077 break; 973 break;
@@ -1092,7 +988,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1092 scsi_print_sense("", cmd); 988 scsi_print_sense("", cmd);
1093 } 989 }
1094 } 990 }
1095 scsi_end_request(cmd, 0, this_count, !result); 991 scsi_end_request(cmd, -EIO, this_count, !result);
1096} 992}
1097 993
1098/* 994/*
@@ -1120,8 +1016,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1120 /* 1016 /*
1121 * If sg table allocation fails, requeue request later. 1017 * If sg table allocation fails, requeue request later.
1122 */ 1018 */
1123 cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1019 if (unlikely(scsi_alloc_sgtable(cmd, GFP_ATOMIC))) {
1124 if (unlikely(!cmd->request_buffer)) {
1125 scsi_unprep_request(req); 1020 scsi_unprep_request(req);
1126 return BLKPREP_DEFER; 1021 return BLKPREP_DEFER;
1127 } 1022 }
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 93ece8f4e5d..01e03f3f6ff 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -359,8 +359,7 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
359 int count; 359 int count;
360 360
361 cmd->use_sg = rq->nr_phys_segments; 361 cmd->use_sg = rq->nr_phys_segments;
362 cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask); 362 if (scsi_alloc_sgtable(cmd, gfp_mask))
363 if (!cmd->request_buffer)
364 return -ENOMEM; 363 return -ENOMEM;
365 364
366 cmd->request_bufflen = rq->data_len; 365 cmd->request_bufflen = rq->data_len;