diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-11-15 03:13:11 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 04:05:27 -0500 |
commit | 5ed7959ede0936c55e50421a53f153b17080e876 (patch) | |
tree | 52fae9cd4959b7855e37c5dc4d0ce7eb22656cdb /drivers/scsi | |
parent | 0db9299f48ebd4a860d6ad4e1d36ac50671d48e7 (diff) |
SG: Convert SCSI to use scatterlist helpers for sg chaining
Also change scsi_alloc_sgtable() to just return 0/failure, since it
maps to the command passed in. ->request_buffer is now no longer needed,
once drivers are adapted to use scsi_sglist() it can be killed.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 138 | ||||
-rw-r--r-- | drivers/scsi/scsi_tgt_lib.c | 3 |
2 files changed, 21 insertions, 120 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 4cf902efbdb..3b5121c4c08 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -739,138 +739,41 @@ static inline unsigned int scsi_sgtable_index(unsigned short nents) | |||
739 | return index; | 739 | return index; |
740 | } | 740 | } |
741 | 741 | ||
742 | struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) | 742 | static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) |
743 | { | 743 | { |
744 | struct scsi_host_sg_pool *sgp; | 744 | struct scsi_host_sg_pool *sgp; |
745 | struct scatterlist *sgl, *prev, *ret; | ||
746 | unsigned int index; | ||
747 | int this, left; | ||
748 | |||
749 | BUG_ON(!cmd->use_sg); | ||
750 | |||
751 | left = cmd->use_sg; | ||
752 | ret = prev = NULL; | ||
753 | do { | ||
754 | this = left; | ||
755 | if (this > SCSI_MAX_SG_SEGMENTS) { | ||
756 | this = SCSI_MAX_SG_SEGMENTS - 1; | ||
757 | index = SG_MEMPOOL_NR - 1; | ||
758 | } else | ||
759 | index = scsi_sgtable_index(this); | ||
760 | 745 | ||
761 | left -= this; | 746 | sgp = scsi_sg_pools + scsi_sgtable_index(nents); |
762 | 747 | mempool_free(sgl, sgp->pool); | |
763 | sgp = scsi_sg_pools + index; | 748 | } |
764 | 749 | ||
765 | sgl = mempool_alloc(sgp->pool, gfp_mask); | 750 | static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) |
766 | if (unlikely(!sgl)) | 751 | { |
767 | goto enomem; | 752 | struct scsi_host_sg_pool *sgp; |
768 | 753 | ||
769 | sg_init_table(sgl, sgp->size); | 754 | sgp = scsi_sg_pools + scsi_sgtable_index(nents); |
755 | return mempool_alloc(sgp->pool, gfp_mask); | ||
756 | } | ||
770 | 757 | ||
771 | /* | 758 | int scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) |
772 | * first loop through, set initial index and return value | 759 | { |
773 | */ | 760 | int ret; |
774 | if (!ret) | ||
775 | ret = sgl; | ||
776 | 761 | ||
777 | /* | 762 | BUG_ON(!cmd->use_sg); |
778 | * chain previous sglist, if any. we know the previous | ||
779 | * sglist must be the biggest one, or we would not have | ||
780 | * ended up doing another loop. | ||
781 | */ | ||
782 | if (prev) | ||
783 | sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); | ||
784 | 763 | ||
785 | /* | 764 | ret = __sg_alloc_table(&cmd->sg_table, cmd->use_sg, gfp_mask, scsi_sg_alloc); |
786 | * if we have nothing left, mark the last segment as | 765 | if (unlikely(ret)) |
787 | * end-of-list | 766 | __sg_free_table(&cmd->sg_table, scsi_sg_free); |
788 | */ | ||
789 | if (!left) | ||
790 | sg_mark_end(&sgl[this - 1]); | ||
791 | 767 | ||
792 | /* | 768 | cmd->request_buffer = cmd->sg_table.sgl; |
793 | * don't allow subsequent mempool allocs to sleep, it would | ||
794 | * violate the mempool principle. | ||
795 | */ | ||
796 | gfp_mask &= ~__GFP_WAIT; | ||
797 | gfp_mask |= __GFP_HIGH; | ||
798 | prev = sgl; | ||
799 | } while (left); | ||
800 | |||
801 | /* | ||
802 | * ->use_sg may get modified after dma mapping has potentially | ||
803 | * shrunk the number of segments, so keep a copy of it for free. | ||
804 | */ | ||
805 | cmd->__use_sg = cmd->use_sg; | ||
806 | return ret; | 769 | return ret; |
807 | enomem: | ||
808 | if (ret) { | ||
809 | /* | ||
810 | * Free entries chained off ret. Since we were trying to | ||
811 | * allocate another sglist, we know that all entries are of | ||
812 | * the max size. | ||
813 | */ | ||
814 | sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; | ||
815 | prev = ret; | ||
816 | ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; | ||
817 | |||
818 | while ((sgl = sg_chain_ptr(ret)) != NULL) { | ||
819 | ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; | ||
820 | mempool_free(sgl, sgp->pool); | ||
821 | } | ||
822 | |||
823 | mempool_free(prev, sgp->pool); | ||
824 | } | ||
825 | return NULL; | ||
826 | } | 770 | } |
827 | 771 | ||
828 | EXPORT_SYMBOL(scsi_alloc_sgtable); | 772 | EXPORT_SYMBOL(scsi_alloc_sgtable); |
829 | 773 | ||
830 | void scsi_free_sgtable(struct scsi_cmnd *cmd) | 774 | void scsi_free_sgtable(struct scsi_cmnd *cmd) |
831 | { | 775 | { |
832 | struct scatterlist *sgl = cmd->request_buffer; | 776 | __sg_free_table(&cmd->sg_table, scsi_sg_free); |
833 | struct scsi_host_sg_pool *sgp; | ||
834 | |||
835 | /* | ||
836 | * if this is the biggest size sglist, check if we have | ||
837 | * chained parts we need to free | ||
838 | */ | ||
839 | if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { | ||
840 | unsigned short this, left; | ||
841 | struct scatterlist *next; | ||
842 | unsigned int index; | ||
843 | |||
844 | left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); | ||
845 | next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); | ||
846 | while (left && next) { | ||
847 | sgl = next; | ||
848 | this = left; | ||
849 | if (this > SCSI_MAX_SG_SEGMENTS) { | ||
850 | this = SCSI_MAX_SG_SEGMENTS - 1; | ||
851 | index = SG_MEMPOOL_NR - 1; | ||
852 | } else | ||
853 | index = scsi_sgtable_index(this); | ||
854 | |||
855 | left -= this; | ||
856 | |||
857 | sgp = scsi_sg_pools + index; | ||
858 | |||
859 | if (left) | ||
860 | next = sg_chain_ptr(&sgl[sgp->size - 1]); | ||
861 | |||
862 | mempool_free(sgl, sgp->pool); | ||
863 | } | ||
864 | |||
865 | /* | ||
866 | * Restore original, will be freed below | ||
867 | */ | ||
868 | sgl = cmd->request_buffer; | ||
869 | sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; | ||
870 | } else | ||
871 | sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg); | ||
872 | |||
873 | mempool_free(sgl, sgp->pool); | ||
874 | } | 777 | } |
875 | 778 | ||
876 | EXPORT_SYMBOL(scsi_free_sgtable); | 779 | EXPORT_SYMBOL(scsi_free_sgtable); |
@@ -1120,8 +1023,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd) | |||
1120 | /* | 1023 | /* |
1121 | * If sg table allocation fails, requeue request later. | 1024 | * If sg table allocation fails, requeue request later. |
1122 | */ | 1025 | */ |
1123 | cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); | 1026 | if (unlikely(scsi_alloc_sgtable(cmd, GFP_ATOMIC))) { |
1124 | if (unlikely(!cmd->request_buffer)) { | ||
1125 | scsi_unprep_request(req); | 1027 | scsi_unprep_request(req); |
1126 | return BLKPREP_DEFER; | 1028 | return BLKPREP_DEFER; |
1127 | } | 1029 | } |
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 93ece8f4e5d..01e03f3f6ff 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -359,8 +359,7 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |||
359 | int count; | 359 | int count; |
360 | 360 | ||
361 | cmd->use_sg = rq->nr_phys_segments; | 361 | cmd->use_sg = rq->nr_phys_segments; |
362 | cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask); | 362 | if (scsi_alloc_sgtable(cmd, gfp_mask)) |
363 | if (!cmd->request_buffer) | ||
364 | return -ENOMEM; | 363 | return -ENOMEM; |
365 | 364 | ||
366 | cmd->request_bufflen = rq->data_len; | 365 | cmd->request_bufflen = rq->data_len; |