diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 140 |
1 files changed, 22 insertions, 118 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f44ab801119b..7c4c889c5221 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -730,138 +730,43 @@ static inline unsigned int scsi_sgtable_index(unsigned short nents) | |||
730 | return index; | 730 | return index; |
731 | } | 731 | } |
732 | 732 | ||
733 | struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) | 733 | static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) |
734 | { | 734 | { |
735 | struct scsi_host_sg_pool *sgp; | 735 | struct scsi_host_sg_pool *sgp; |
736 | struct scatterlist *sgl, *prev, *ret; | ||
737 | unsigned int index; | ||
738 | int this, left; | ||
739 | |||
740 | BUG_ON(!cmd->use_sg); | ||
741 | |||
742 | left = cmd->use_sg; | ||
743 | ret = prev = NULL; | ||
744 | do { | ||
745 | this = left; | ||
746 | if (this > SCSI_MAX_SG_SEGMENTS) { | ||
747 | this = SCSI_MAX_SG_SEGMENTS - 1; | ||
748 | index = SG_MEMPOOL_NR - 1; | ||
749 | } else | ||
750 | index = scsi_sgtable_index(this); | ||
751 | 736 | ||
752 | left -= this; | 737 | sgp = scsi_sg_pools + scsi_sgtable_index(nents); |
753 | 738 | mempool_free(sgl, sgp->pool); | |
754 | sgp = scsi_sg_pools + index; | 739 | } |
755 | 740 | ||
756 | sgl = mempool_alloc(sgp->pool, gfp_mask); | 741 | static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) |
757 | if (unlikely(!sgl)) | 742 | { |
758 | goto enomem; | 743 | struct scsi_host_sg_pool *sgp; |
759 | 744 | ||
760 | sg_init_table(sgl, sgp->size); | 745 | sgp = scsi_sg_pools + scsi_sgtable_index(nents); |
746 | return mempool_alloc(sgp->pool, gfp_mask); | ||
747 | } | ||
761 | 748 | ||
762 | /* | 749 | int scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) |
763 | * first loop through, set initial index and return value | 750 | { |
764 | */ | 751 | int ret; |
765 | if (!ret) | ||
766 | ret = sgl; | ||
767 | 752 | ||
768 | /* | 753 | BUG_ON(!cmd->use_sg); |
769 | * chain previous sglist, if any. we know the previous | ||
770 | * sglist must be the biggest one, or we would not have | ||
771 | * ended up doing another loop. | ||
772 | */ | ||
773 | if (prev) | ||
774 | sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); | ||
775 | 754 | ||
776 | /* | 755 | ret = __sg_alloc_table(&cmd->sg_table, cmd->use_sg, |
777 | * if we have nothing left, mark the last segment as | 756 | SCSI_MAX_SG_SEGMENTS, gfp_mask, scsi_sg_alloc); |
778 | * end-of-list | 757 | if (unlikely(ret)) |
779 | */ | 758 | __sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS, |
780 | if (!left) | 759 | scsi_sg_free); |
781 | sg_mark_end(&sgl[this - 1]); | ||
782 | 760 | ||
783 | /* | 761 | cmd->request_buffer = cmd->sg_table.sgl; |
784 | * don't allow subsequent mempool allocs to sleep, it would | ||
785 | * violate the mempool principle. | ||
786 | */ | ||
787 | gfp_mask &= ~__GFP_WAIT; | ||
788 | gfp_mask |= __GFP_HIGH; | ||
789 | prev = sgl; | ||
790 | } while (left); | ||
791 | |||
792 | /* | ||
793 | * ->use_sg may get modified after dma mapping has potentially | ||
794 | * shrunk the number of segments, so keep a copy of it for free. | ||
795 | */ | ||
796 | cmd->__use_sg = cmd->use_sg; | ||
797 | return ret; | 762 | return ret; |
798 | enomem: | ||
799 | if (ret) { | ||
800 | /* | ||
801 | * Free entries chained off ret. Since we were trying to | ||
802 | * allocate another sglist, we know that all entries are of | ||
803 | * the max size. | ||
804 | */ | ||
805 | sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; | ||
806 | prev = ret; | ||
807 | ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; | ||
808 | |||
809 | while ((sgl = sg_chain_ptr(ret)) != NULL) { | ||
810 | ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; | ||
811 | mempool_free(sgl, sgp->pool); | ||
812 | } | ||
813 | |||
814 | mempool_free(prev, sgp->pool); | ||
815 | } | ||
816 | return NULL; | ||
817 | } | 763 | } |
818 | 764 | ||
819 | EXPORT_SYMBOL(scsi_alloc_sgtable); | 765 | EXPORT_SYMBOL(scsi_alloc_sgtable); |
820 | 766 | ||
821 | void scsi_free_sgtable(struct scsi_cmnd *cmd) | 767 | void scsi_free_sgtable(struct scsi_cmnd *cmd) |
822 | { | 768 | { |
823 | struct scatterlist *sgl = cmd->request_buffer; | 769 | __sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); |
824 | struct scsi_host_sg_pool *sgp; | ||
825 | |||
826 | /* | ||
827 | * if this is the biggest size sglist, check if we have | ||
828 | * chained parts we need to free | ||
829 | */ | ||
830 | if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { | ||
831 | unsigned short this, left; | ||
832 | struct scatterlist *next; | ||
833 | unsigned int index; | ||
834 | |||
835 | left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); | ||
836 | next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); | ||
837 | while (left && next) { | ||
838 | sgl = next; | ||
839 | this = left; | ||
840 | if (this > SCSI_MAX_SG_SEGMENTS) { | ||
841 | this = SCSI_MAX_SG_SEGMENTS - 1; | ||
842 | index = SG_MEMPOOL_NR - 1; | ||
843 | } else | ||
844 | index = scsi_sgtable_index(this); | ||
845 | |||
846 | left -= this; | ||
847 | |||
848 | sgp = scsi_sg_pools + index; | ||
849 | |||
850 | if (left) | ||
851 | next = sg_chain_ptr(&sgl[sgp->size - 1]); | ||
852 | |||
853 | mempool_free(sgl, sgp->pool); | ||
854 | } | ||
855 | |||
856 | /* | ||
857 | * Restore original, will be freed below | ||
858 | */ | ||
859 | sgl = cmd->request_buffer; | ||
860 | sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; | ||
861 | } else | ||
862 | sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg); | ||
863 | |||
864 | mempool_free(sgl, sgp->pool); | ||
865 | } | 770 | } |
866 | 771 | ||
867 | EXPORT_SYMBOL(scsi_free_sgtable); | 772 | EXPORT_SYMBOL(scsi_free_sgtable); |
@@ -1111,8 +1016,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd) | |||
1111 | /* | 1016 | /* |
1112 | * If sg table allocation fails, requeue request later. | 1017 | * If sg table allocation fails, requeue request later. |
1113 | */ | 1018 | */ |
1114 | cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); | 1019 | if (unlikely(scsi_alloc_sgtable(cmd, GFP_ATOMIC))) { |
1115 | if (unlikely(!cmd->request_buffer)) { | ||
1116 | scsi_unprep_request(req); | 1020 | scsi_unprep_request(req); |
1117 | return BLKPREP_DEFER; | 1021 | return BLKPREP_DEFER; |
1118 | } | 1022 | } |