summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-11 18:17:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-11 18:17:41 -0400
commit1f7563f743d7081710a9d186a8b203997d09f383 (patch)
tree55091227fb177f25c45f33dfb5f0b2a5e22ccfa7
parentba6d10ab8014ac10d25ca513352b6665e73b5785 (diff)
parent3e99b3b13a1fc8f7354edaee4c04f73a07faba69 (diff)
Merge tag 'scsi-sg' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI scatter-gather list updates from James Bottomley: "This topic branch covers a fundamental change in how our sg lists are allocated to make mq more efficient by reducing the size of the preallocated sg list. This necessitates a large number of driver changes because the previous guarantee that if a driver specified SG_ALL as the size of its scatter list, it would get a non-chained list and didn't need to bother with scatterlist iterators is now broken and every driver *must* use scatterlist iterators. This was broken out as a separate topic because we need to convert all the drivers before pulling the trigger and unconverted drivers kept being found, necessitating a rebase" * tag 'scsi-sg' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (21 commits) scsi: core: don't preallocate small SGL in case of NO_SG_CHAIN scsi: lib/sg_pool.c: clear 'first_chunk' in case of no preallocation scsi: core: avoid preallocating big SGL for data scsi: core: avoid preallocating big SGL for protection information scsi: lib/sg_pool.c: improve APIs for allocating sg pool scsi: esp: use sg helper to iterate over scatterlist scsi: NCR5380: use sg helper to iterate over scatterlist scsi: wd33c93: use sg helper to iterate over scatterlist scsi: ppa: use sg helper to iterate over scatterlist scsi: pcmcia: nsp_cs: use sg helper to iterate over scatterlist scsi: imm: use sg helper to iterate over scatterlist scsi: aha152x: use sg helper to iterate over scatterlist scsi: s390: zfcp_fc: use sg helper to iterate over scatterlist scsi: staging: unisys: visorhba: use sg helper to iterate over scatterlist scsi: usb: image: microtek: use sg helper to iterate over scatterlist scsi: pmcraid: use sg helper to iterate over scatterlist scsi: ipr: use sg helper to iterate over scatterlist scsi: mvumi: use sg helper to iterate over scatterlist scsi: lpfc: use sg helper to iterate over scatterlist scsi: advansys: use sg helper to iterate over scatterlist ...
-rw-r--r--drivers/nvme/host/fc.c7
-rw-r--r--drivers/nvme/host/rdma.c7
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c4
-rw-r--r--drivers/scsi/NCR5380.c41
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c46
-rw-r--r--drivers/scsi/esp_scsi.c20
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c3
-rw-r--r--drivers/scsi/mvumi.c11
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c4
-rw-r--r--drivers/scsi/pmcraid.c14
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/scsi_lib.c35
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c9
-rw-r--r--drivers/usb/image/microtek.c20
-rw-r--r--drivers/usb/image/microtek.h2
-rw-r--r--include/linux/scatterlist.h11
-rw-r--r--lib/scatterlist.c36
-rw-r--r--lib/sg_pool.c39
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c5
26 files changed, 205 insertions, 154 deletions
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9b497d785ed7..dcb2b799966f 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2112,7 +2112,8 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2112 2112
2113 freq->sg_table.sgl = freq->first_sgl; 2113 freq->sg_table.sgl = freq->first_sgl;
2114 ret = sg_alloc_table_chained(&freq->sg_table, 2114 ret = sg_alloc_table_chained(&freq->sg_table,
2115 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl); 2115 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2116 SG_CHUNK_SIZE);
2116 if (ret) 2117 if (ret)
2117 return -ENOMEM; 2118 return -ENOMEM;
2118 2119
@@ -2122,7 +2123,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2122 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 2123 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2123 op->nents, dir); 2124 op->nents, dir);
2124 if (unlikely(freq->sg_cnt <= 0)) { 2125 if (unlikely(freq->sg_cnt <= 0)) {
2125 sg_free_table_chained(&freq->sg_table, true); 2126 sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2126 freq->sg_cnt = 0; 2127 freq->sg_cnt = 0;
2127 return -EFAULT; 2128 return -EFAULT;
2128 } 2129 }
@@ -2148,7 +2149,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2148 2149
2149 nvme_cleanup_cmd(rq); 2150 nvme_cleanup_cmd(rq);
2150 2151
2151 sg_free_table_chained(&freq->sg_table, true); 2152 sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2152 2153
2153 freq->sg_cnt = 0; 2154 freq->sg_cnt = 0;
2154} 2155}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 97f668a39ae1..676619c1454a 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1144,7 +1144,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
1144 WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1144 WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1145 1145
1146 nvme_cleanup_cmd(rq); 1146 nvme_cleanup_cmd(rq);
1147 sg_free_table_chained(&req->sg_table, true); 1147 sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
1148} 1148}
1149 1149
1150static int nvme_rdma_set_sg_null(struct nvme_command *c) 1150static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1259,7 +1259,8 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
1259 1259
1260 req->sg_table.sgl = req->first_sgl; 1260 req->sg_table.sgl = req->first_sgl;
1261 ret = sg_alloc_table_chained(&req->sg_table, 1261 ret = sg_alloc_table_chained(&req->sg_table,
1262 blk_rq_nr_phys_segments(rq), req->sg_table.sgl); 1262 blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
1263 SG_CHUNK_SIZE);
1263 if (ret) 1264 if (ret)
1264 return -ENOMEM; 1265 return -ENOMEM;
1265 1266
@@ -1299,7 +1300,7 @@ out_unmap_sg:
1299 req->nents, rq_data_dir(rq) == 1300 req->nents, rq_data_dir(rq) ==
1300 WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1301 WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1301out_free_table: 1302out_free_table:
1302 sg_free_table_chained(&req->sg_table, true); 1303 sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
1303 return ret; 1304 return ret;
1304} 1305}
1305 1306
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9e211ad6bdd3..b16dc3981c69 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -77,7 +77,7 @@ static void nvme_loop_complete_rq(struct request *req)
77 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 77 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
78 78
79 nvme_cleanup_cmd(req); 79 nvme_cleanup_cmd(req);
80 sg_free_table_chained(&iod->sg_table, true); 80 sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
81 nvme_complete_rq(req); 81 nvme_complete_rq(req);
82} 82}
83 83
@@ -157,7 +157,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
157 iod->sg_table.sgl = iod->first_sgl; 157 iod->sg_table.sgl = iod->first_sgl;
158 if (sg_alloc_table_chained(&iod->sg_table, 158 if (sg_alloc_table_chained(&iod->sg_table,
159 blk_rq_nr_phys_segments(req), 159 blk_rq_nr_phys_segments(req),
160 iod->sg_table.sgl)) 160 iod->sg_table.sgl, SG_CHUNK_SIZE))
161 return BLK_STS_RESOURCE; 161 return BLK_STS_RESOURCE;
162 162
163 iod->req.sg = iod->sg_table.sgl; 163 iod->req.sg = iod->sg_table.sgl;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 33eddb02ee30..b018b61bd168 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -620,7 +620,7 @@ static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
620{ 620{
621 int i; 621 int i;
622 622
623 for (i = 0; i < count; i++, sg++) 623 for (i = 0; i < count; i++, sg = sg_next(sg))
624 if (sg) 624 if (sg)
625 free_page((unsigned long) sg_virt(sg)); 625 free_page((unsigned long) sg_virt(sg));
626 else 626 else
@@ -641,7 +641,7 @@ static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
641 int i; 641 int i;
642 642
643 sg_init_table(sg, count); 643 sg_init_table(sg, count);
644 for (i = 0; i < count; i++, sg++) { 644 for (i = 0; i < count; i++, sg = sg_next(sg)) {
645 addr = (void *) get_zeroed_page(GFP_KERNEL); 645 addr = (void *) get_zeroed_page(GFP_KERNEL);
646 if (!addr) { 646 if (!addr) {
647 zfcp_fc_sg_free_table(sg, i); 647 zfcp_fc_sg_free_table(sg, i);
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d9fa9cf2fd8b..536426f25e86 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -149,12 +149,10 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
149 149
150 if (scsi_bufflen(cmd)) { 150 if (scsi_bufflen(cmd)) {
151 cmd->SCp.buffer = scsi_sglist(cmd); 151 cmd->SCp.buffer = scsi_sglist(cmd);
152 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
153 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 152 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
154 cmd->SCp.this_residual = cmd->SCp.buffer->length; 153 cmd->SCp.this_residual = cmd->SCp.buffer->length;
155 } else { 154 } else {
156 cmd->SCp.buffer = NULL; 155 cmd->SCp.buffer = NULL;
157 cmd->SCp.buffers_residual = 0;
158 cmd->SCp.ptr = NULL; 156 cmd->SCp.ptr = NULL;
159 cmd->SCp.this_residual = 0; 157 cmd->SCp.this_residual = 0;
160 } 158 }
@@ -163,6 +161,17 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
163 cmd->SCp.Message = 0; 161 cmd->SCp.Message = 0;
164} 162}
165 163
164static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
165{
166 struct scatterlist *s = cmd->SCp.buffer;
167
168 if (!cmd->SCp.this_residual && s && !sg_is_last(s)) {
169 cmd->SCp.buffer = sg_next(s);
170 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
171 cmd->SCp.this_residual = cmd->SCp.buffer->length;
172 }
173}
174
166/** 175/**
167 * NCR5380_poll_politely2 - wait for two chip register values 176 * NCR5380_poll_politely2 - wait for two chip register values
168 * @hostdata: host private data 177 * @hostdata: host private data
@@ -1670,12 +1679,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1670 sun3_dma_setup_done != cmd) { 1679 sun3_dma_setup_done != cmd) {
1671 int count; 1680 int count;
1672 1681
1673 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { 1682 advance_sg_buffer(cmd);
1674 ++cmd->SCp.buffer;
1675 --cmd->SCp.buffers_residual;
1676 cmd->SCp.this_residual = cmd->SCp.buffer->length;
1677 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
1678 }
1679 1683
1680 count = sun3scsi_dma_xfer_len(hostdata, cmd); 1684 count = sun3scsi_dma_xfer_len(hostdata, cmd);
1681 1685
@@ -1725,15 +1729,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1725 * scatter-gather list, move onto the next one. 1729 * scatter-gather list, move onto the next one.
1726 */ 1730 */
1727 1731
1728 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { 1732 advance_sg_buffer(cmd);
1729 ++cmd->SCp.buffer; 1733 dsprintk(NDEBUG_INFORMATION, instance,
1730 --cmd->SCp.buffers_residual; 1734 "this residual %d, sg ents %d\n",
1731 cmd->SCp.this_residual = cmd->SCp.buffer->length; 1735 cmd->SCp.this_residual,
1732 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 1736 sg_nents(cmd->SCp.buffer));
1733 dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n",
1734 cmd->SCp.this_residual,
1735 cmd->SCp.buffers_residual);
1736 }
1737 1737
1738 /* 1738 /*
1739 * The preferred transfer method is going to be 1739 * The preferred transfer method is going to be
@@ -2126,12 +2126,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2126 if (sun3_dma_setup_done != tmp) { 2126 if (sun3_dma_setup_done != tmp) {
2127 int count; 2127 int count;
2128 2128
2129 if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { 2129 advance_sg_buffer(tmp);
2130 ++tmp->SCp.buffer;
2131 --tmp->SCp.buffers_residual;
2132 tmp->SCp.this_residual = tmp->SCp.buffer->length;
2133 tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
2134 }
2135 2130
2136 count = sun3scsi_dma_xfer_len(hostdata, tmp); 2131 count = sun3scsi_dma_xfer_len(hostdata, tmp);
2137 2132
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 926311c792d5..a242a62caaa1 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7710,7 +7710,7 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp,
7710 sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */ 7710 sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */
7711 return ADV_SUCCESS; 7711 return ADV_SUCCESS;
7712 } 7712 }
7713 slp++; 7713 slp = sg_next(slp);
7714 } 7714 }
7715 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; 7715 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
7716 prev_sg_block = sg_block; 7716 prev_sg_block = sg_block;
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 88c649b3ef61..eb466c2e1839 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -937,7 +937,6 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
937 SCp.ptr : buffer pointer 937 SCp.ptr : buffer pointer
938 SCp.this_residual : buffer length 938 SCp.this_residual : buffer length
939 SCp.buffer : next buffer 939 SCp.buffer : next buffer
940 SCp.buffers_residual : left buffers in list
941 SCp.phase : current state of the command */ 940 SCp.phase : current state of the command */
942 941
943 if ((phase & resetting) || !scsi_sglist(SCpnt)) { 942 if ((phase & resetting) || !scsi_sglist(SCpnt)) {
@@ -945,13 +944,11 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
945 SCpnt->SCp.this_residual = 0; 944 SCpnt->SCp.this_residual = 0;
946 scsi_set_resid(SCpnt, 0); 945 scsi_set_resid(SCpnt, 0);
947 SCpnt->SCp.buffer = NULL; 946 SCpnt->SCp.buffer = NULL;
948 SCpnt->SCp.buffers_residual = 0;
949 } else { 947 } else {
950 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); 948 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
951 SCpnt->SCp.buffer = scsi_sglist(SCpnt); 949 SCpnt->SCp.buffer = scsi_sglist(SCpnt);
952 SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer); 950 SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
953 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; 951 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
954 SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
955 } 952 }
956 953
957 DO_LOCK(flags); 954 DO_LOCK(flags);
@@ -2019,10 +2016,9 @@ static void datai_run(struct Scsi_Host *shpnt)
2019 } 2016 }
2020 2017
2021 if (CURRENT_SC->SCp.this_residual == 0 && 2018 if (CURRENT_SC->SCp.this_residual == 0 &&
2022 CURRENT_SC->SCp.buffers_residual > 0) { 2019 !sg_is_last(CURRENT_SC->SCp.buffer)) {
2023 /* advance to next buffer */ 2020 /* advance to next buffer */
2024 CURRENT_SC->SCp.buffers_residual--; 2021 CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
2025 CURRENT_SC->SCp.buffer++;
2026 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer); 2022 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
2027 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length; 2023 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
2028 } 2024 }
@@ -2125,10 +2121,10 @@ static void datao_run(struct Scsi_Host *shpnt)
2125 CMD_INC_RESID(CURRENT_SC, -2 * data_count); 2121 CMD_INC_RESID(CURRENT_SC, -2 * data_count);
2126 } 2122 }
2127 2123
2128 if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) { 2124 if (CURRENT_SC->SCp.this_residual == 0 &&
2125 !sg_is_last(CURRENT_SC->SCp.buffer)) {
2129 /* advance to next buffer */ 2126 /* advance to next buffer */
2130 CURRENT_SC->SCp.buffers_residual--; 2127 CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
2131 CURRENT_SC->SCp.buffer++;
2132 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer); 2128 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
2133 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length; 2129 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
2134 } 2130 }
@@ -2147,22 +2143,26 @@ static void datao_run(struct Scsi_Host *shpnt)
2147static void datao_end(struct Scsi_Host *shpnt) 2143static void datao_end(struct Scsi_Host *shpnt)
2148{ 2144{
2149 if(TESTLO(DMASTAT, DFIFOEMP)) { 2145 if(TESTLO(DMASTAT, DFIFOEMP)) {
2150 int data_count = (DATA_LEN - scsi_get_resid(CURRENT_SC)) - 2146 u32 datao_cnt = GETSTCNT();
2151 GETSTCNT(); 2147 int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC);
2148 int done;
2149 struct scatterlist *sg = scsi_sglist(CURRENT_SC);
2152 2150
2153 CMD_INC_RESID(CURRENT_SC, data_count); 2151 CMD_INC_RESID(CURRENT_SC, datao_out - datao_cnt);
2154 2152
2155 data_count -= CURRENT_SC->SCp.ptr - 2153 done = scsi_bufflen(CURRENT_SC) - scsi_get_resid(CURRENT_SC);
2156 SG_ADDRESS(CURRENT_SC->SCp.buffer); 2154 /* Locate the first SG entry not yet sent */
2157 while(data_count>0) { 2155 while (done > 0 && !sg_is_last(sg)) {
2158 CURRENT_SC->SCp.buffer--; 2156 if (done < sg->length)
2159 CURRENT_SC->SCp.buffers_residual++; 2157 break;
2160 data_count -= CURRENT_SC->SCp.buffer->length; 2158 done -= sg->length;
2159 sg = sg_next(sg);
2161 } 2160 }
2162 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) - 2161
2163 data_count; 2162 CURRENT_SC->SCp.buffer = sg;
2164 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length + 2163 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) + done;
2165 data_count; 2164 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length -
2165 done;
2166 } 2166 }
2167 2167
2168 SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); 2168 SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2490,7 +2490,7 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
2490 2490
2491 seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |", 2491 seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
2492 scsi_get_resid(ptr), ptr->SCp.this_residual, 2492 scsi_get_resid(ptr), ptr->SCp.this_residual,
2493 ptr->SCp.buffers_residual); 2493 sg_nents(ptr->SCp.buffer) - 1);
2494 2494
2495 if (ptr->SCp.phase & not_issued) 2495 if (ptr->SCp.phase & not_issued)
2496 seq_puts(m, "not issued|"); 2496 seq_puts(m, "not issued|");
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 76e7ca864d6a..bb88995a12c7 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -371,6 +371,7 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
371 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 371 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
372 struct scatterlist *sg = scsi_sglist(cmd); 372 struct scatterlist *sg = scsi_sglist(cmd);
373 int total = 0, i; 373 int total = 0, i;
374 struct scatterlist *s;
374 375
375 if (cmd->sc_data_direction == DMA_NONE) 376 if (cmd->sc_data_direction == DMA_NONE)
376 return; 377 return;
@@ -381,16 +382,18 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
381 * a dma address, so perform an identity mapping. 382 * a dma address, so perform an identity mapping.
382 */ 383 */
383 spriv->num_sg = scsi_sg_count(cmd); 384 spriv->num_sg = scsi_sg_count(cmd);
384 for (i = 0; i < spriv->num_sg; i++) { 385
385 sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]); 386 scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
386 total += sg_dma_len(&sg[i]); 387 s->dma_address = (uintptr_t)sg_virt(s);
388 total += sg_dma_len(s);
387 } 389 }
388 } else { 390 } else {
389 spriv->num_sg = scsi_dma_map(cmd); 391 spriv->num_sg = scsi_dma_map(cmd);
390 for (i = 0; i < spriv->num_sg; i++) 392 scsi_for_each_sg(cmd, s, spriv->num_sg, i)
391 total += sg_dma_len(&sg[i]); 393 total += sg_dma_len(s);
392 } 394 }
393 spriv->cur_residue = sg_dma_len(sg); 395 spriv->cur_residue = sg_dma_len(sg);
396 spriv->prv_sg = NULL;
394 spriv->cur_sg = sg; 397 spriv->cur_sg = sg;
395 spriv->tot_residue = total; 398 spriv->tot_residue = total;
396} 399}
@@ -444,7 +447,8 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
444 p->tot_residue = 0; 447 p->tot_residue = 0;
445 } 448 }
446 if (!p->cur_residue && p->tot_residue) { 449 if (!p->cur_residue && p->tot_residue) {
447 p->cur_sg++; 450 p->prv_sg = p->cur_sg;
451 p->cur_sg = sg_next(p->cur_sg);
448 p->cur_residue = sg_dma_len(p->cur_sg); 452 p->cur_residue = sg_dma_len(p->cur_sg);
449 } 453 }
450} 454}
@@ -465,6 +469,7 @@ static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
465 return; 469 return;
466 } 470 }
467 ent->saved_cur_residue = spriv->cur_residue; 471 ent->saved_cur_residue = spriv->cur_residue;
472 ent->saved_prv_sg = spriv->prv_sg;
468 ent->saved_cur_sg = spriv->cur_sg; 473 ent->saved_cur_sg = spriv->cur_sg;
469 ent->saved_tot_residue = spriv->tot_residue; 474 ent->saved_tot_residue = spriv->tot_residue;
470} 475}
@@ -479,6 +484,7 @@ static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
479 return; 484 return;
480 } 485 }
481 spriv->cur_residue = ent->saved_cur_residue; 486 spriv->cur_residue = ent->saved_cur_residue;
487 spriv->prv_sg = ent->saved_prv_sg;
482 spriv->cur_sg = ent->saved_cur_sg; 488 spriv->cur_sg = ent->saved_cur_sg;
483 spriv->tot_residue = ent->saved_tot_residue; 489 spriv->tot_residue = ent->saved_tot_residue;
484} 490}
@@ -1647,7 +1653,7 @@ static int esp_msgin_process(struct esp *esp)
1647 spriv = ESP_CMD_PRIV(ent->cmd); 1653 spriv = ESP_CMD_PRIV(ent->cmd);
1648 1654
1649 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { 1655 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1650 spriv->cur_sg--; 1656 spriv->cur_sg = spriv->prv_sg;
1651 spriv->cur_residue = 1; 1657 spriv->cur_residue = 1;
1652 } else 1658 } else
1653 spriv->cur_residue++; 1659 spriv->cur_residue++;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index aa87a6b72dcc..91b32f2a1a1b 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -251,6 +251,7 @@
251struct esp_cmd_priv { 251struct esp_cmd_priv {
252 int num_sg; 252 int num_sg;
253 int cur_residue; 253 int cur_residue;
254 struct scatterlist *prv_sg;
254 struct scatterlist *cur_sg; 255 struct scatterlist *cur_sg;
255 int tot_residue; 256 int tot_residue;
256}; 257};
@@ -273,6 +274,7 @@ struct esp_cmd_entry {
273 struct scsi_cmnd *cmd; 274 struct scsi_cmnd *cmd;
274 275
275 unsigned int saved_cur_residue; 276 unsigned int saved_cur_residue;
277 struct scatterlist *saved_prv_sg;
276 struct scatterlist *saved_cur_sg; 278 struct scatterlist *saved_cur_sg;
277 unsigned int saved_tot_residue; 279 unsigned int saved_tot_residue;
278 280
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 9751309f8b8c..2519fb7aee51 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -687,7 +687,7 @@ static int imm_completion(struct scsi_cmnd *cmd)
687 if (cmd->SCp.buffer && !cmd->SCp.this_residual) { 687 if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
688 /* if scatter/gather, advance to the next segment */ 688 /* if scatter/gather, advance to the next segment */
689 if (cmd->SCp.buffers_residual--) { 689 if (cmd->SCp.buffers_residual--) {
690 cmd->SCp.buffer++; 690 cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
691 cmd->SCp.this_residual = 691 cmd->SCp.this_residual =
692 cmd->SCp.buffer->length; 692 cmd->SCp.buffer->length;
693 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 693 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d06bc1a817a1..079c04bc448a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3901,22 +3901,23 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3901 u8 *buffer, u32 len) 3901 u8 *buffer, u32 len)
3902{ 3902{
3903 int bsize_elem, i, result = 0; 3903 int bsize_elem, i, result = 0;
3904 struct scatterlist *scatterlist; 3904 struct scatterlist *sg;
3905 void *kaddr; 3905 void *kaddr;
3906 3906
3907 /* Determine the actual number of bytes per element */ 3907 /* Determine the actual number of bytes per element */
3908 bsize_elem = PAGE_SIZE * (1 << sglist->order); 3908 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3909 3909
3910 scatterlist = sglist->scatterlist; 3910 sg = sglist->scatterlist;
3911 3911
3912 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { 3912 for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3913 struct page *page = sg_page(&scatterlist[i]); 3913 buffer += bsize_elem) {
3914 struct page *page = sg_page(sg);
3914 3915
3915 kaddr = kmap(page); 3916 kaddr = kmap(page);
3916 memcpy(kaddr, buffer, bsize_elem); 3917 memcpy(kaddr, buffer, bsize_elem);
3917 kunmap(page); 3918 kunmap(page);
3918 3919
3919 scatterlist[i].length = bsize_elem; 3920 sg->length = bsize_elem;
3920 3921
3921 if (result != 0) { 3922 if (result != 0) {
3922 ipr_trace; 3923 ipr_trace;
@@ -3925,13 +3926,13 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3925 } 3926 }
3926 3927
3927 if (len % bsize_elem) { 3928 if (len % bsize_elem) {
3928 struct page *page = sg_page(&scatterlist[i]); 3929 struct page *page = sg_page(sg);
3929 3930
3930 kaddr = kmap(page); 3931 kaddr = kmap(page);
3931 memcpy(kaddr, buffer, len % bsize_elem); 3932 memcpy(kaddr, buffer, len % bsize_elem);
3932 kunmap(page); 3933 kunmap(page);
3933 3934
3934 scatterlist[i].length = len % bsize_elem; 3935 sg->length = len % bsize_elem;
3935 } 3936 }
3936 3937
3937 sglist->buffer_len = len; 3938 sglist->buffer_len = len;
@@ -3952,6 +3953,7 @@ static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3952 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3953 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3953 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 3954 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3954 struct scatterlist *scatterlist = sglist->scatterlist; 3955 struct scatterlist *scatterlist = sglist->scatterlist;
3956 struct scatterlist *sg;
3955 int i; 3957 int i;
3956 3958
3957 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3959 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
@@ -3960,10 +3962,10 @@ static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3960 3962
3961 ioarcb->ioadl_len = 3963 ioarcb->ioadl_len =
3962 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 3964 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3963 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3965 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3964 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); 3966 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3965 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i])); 3967 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3966 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i])); 3968 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3967 } 3969 }
3968 3970
3969 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3971 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
@@ -3983,6 +3985,7 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3983 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3985 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3984 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 3986 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3985 struct scatterlist *scatterlist = sglist->scatterlist; 3987 struct scatterlist *scatterlist = sglist->scatterlist;
3988 struct scatterlist *sg;
3986 int i; 3989 int i;
3987 3990
3988 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3991 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
@@ -3992,11 +3995,11 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3992 ioarcb->ioadl_len = 3995 ioarcb->ioadl_len =
3993 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 3996 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3994 3997
3995 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3998 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3996 ioadl[i].flags_and_data_len = 3999 ioadl[i].flags_and_data_len =
3997 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i])); 4000 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
3998 ioadl[i].address = 4001 ioadl[i].address =
3999 cpu_to_be32(sg_dma_address(&scatterlist[i])); 4002 cpu_to_be32(sg_dma_address(sg));
4000 } 4003 }
4001 4004
4002 ioadl[i-1].flags_and_data_len |= 4005 ioadl[i-1].flags_and_data_len |=
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index d5812719de2b..faa596f9e861 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -2904,8 +2904,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2904 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 2904 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2905 nvmewqe->context1 = ndlp; 2905 nvmewqe->context1 = ndlp;
2906 2906
2907 for (i = 0; i < rsp->sg_cnt; i++) { 2907 for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
2908 sgel = &rsp->sg[i];
2909 physaddr = sg_dma_address(sgel); 2908 physaddr = sg_dma_address(sgel);
2910 cnt = sg_dma_len(sgel); 2909 cnt = sg_dma_len(sgel);
2911 sgl->addr_hi = putPaddrHigh(physaddr); 2910 sgl->addr_hi = putPaddrHigh(physaddr);
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 1fb6f6ca627e..8906aceda4c4 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -195,23 +195,22 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
195 unsigned int sgnum = scsi_sg_count(scmd); 195 unsigned int sgnum = scsi_sg_count(scmd);
196 dma_addr_t busaddr; 196 dma_addr_t busaddr;
197 197
198 sg = scsi_sglist(scmd); 198 *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
199 *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
200 scmd->sc_data_direction); 199 scmd->sc_data_direction);
201 if (*sg_count > mhba->max_sge) { 200 if (*sg_count > mhba->max_sge) {
202 dev_err(&mhba->pdev->dev, 201 dev_err(&mhba->pdev->dev,
203 "sg count[0x%x] is bigger than max sg[0x%x].\n", 202 "sg count[0x%x] is bigger than max sg[0x%x].\n",
204 *sg_count, mhba->max_sge); 203 *sg_count, mhba->max_sge);
205 dma_unmap_sg(&mhba->pdev->dev, sg, sgnum, 204 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
206 scmd->sc_data_direction); 205 scmd->sc_data_direction);
207 return -1; 206 return -1;
208 } 207 }
209 for (i = 0; i < *sg_count; i++) { 208 scsi_for_each_sg(scmd, sg, *sg_count, i) {
210 busaddr = sg_dma_address(&sg[i]); 209 busaddr = sg_dma_address(sg);
211 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); 210 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
212 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); 211 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
213 m_sg->flags = 0; 212 m_sg->flags = 0;
214 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); 213 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
215 if ((i + 1) == *sg_count) 214 if ((i + 1) == *sg_count)
216 m_sg->flags |= 1U << mhba->eot_flag; 215 m_sg->flags |= 1U << mhba->eot_flag;
217 216
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index a81748e6e8fb..97416e1dcc5b 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -789,7 +789,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
789 SCpnt->SCp.buffers_residual != 0 ) { 789 SCpnt->SCp.buffers_residual != 0 ) {
790 //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out); 790 //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out);
791 SCpnt->SCp.buffers_residual--; 791 SCpnt->SCp.buffers_residual--;
792 SCpnt->SCp.buffer++; 792 SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
793 SCpnt->SCp.ptr = BUFFER_ADDR; 793 SCpnt->SCp.ptr = BUFFER_ADDR;
794 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; 794 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
795 time_out = 1000; 795 time_out = 1000;
@@ -887,7 +887,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
887 SCpnt->SCp.buffers_residual != 0 ) { 887 SCpnt->SCp.buffers_residual != 0 ) {
888 //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next"); 888 //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next");
889 SCpnt->SCp.buffers_residual--; 889 SCpnt->SCp.buffers_residual--;
890 SCpnt->SCp.buffer++; 890 SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
891 SCpnt->SCp.ptr = BUFFER_ADDR; 891 SCpnt->SCp.ptr = BUFFER_ADDR;
892 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; 892 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
893 time_out = 1000; 893 time_out = 1000;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index ca22526aff7f..71ff3936da4f 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3255,7 +3255,7 @@ static int pmcraid_copy_sglist(
3255 int direction 3255 int direction
3256) 3256)
3257{ 3257{
3258 struct scatterlist *scatterlist; 3258 struct scatterlist *sg;
3259 void *kaddr; 3259 void *kaddr;
3260 int bsize_elem; 3260 int bsize_elem;
3261 int i; 3261 int i;
@@ -3264,10 +3264,10 @@ static int pmcraid_copy_sglist(
3264 /* Determine the actual number of bytes per element */ 3264 /* Determine the actual number of bytes per element */
3265 bsize_elem = PAGE_SIZE * (1 << sglist->order); 3265 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3266 3266
3267 scatterlist = sglist->scatterlist; 3267 sg = sglist->scatterlist;
3268 3268
3269 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { 3269 for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
3270 struct page *page = sg_page(&scatterlist[i]); 3270 struct page *page = sg_page(sg);
3271 3271
3272 kaddr = kmap(page); 3272 kaddr = kmap(page);
3273 if (direction == DMA_TO_DEVICE) 3273 if (direction == DMA_TO_DEVICE)
@@ -3282,11 +3282,11 @@ static int pmcraid_copy_sglist(
3282 return -EFAULT; 3282 return -EFAULT;
3283 } 3283 }
3284 3284
3285 scatterlist[i].length = bsize_elem; 3285 sg->length = bsize_elem;
3286 } 3286 }
3287 3287
3288 if (len % bsize_elem) { 3288 if (len % bsize_elem) {
3289 struct page *page = sg_page(&scatterlist[i]); 3289 struct page *page = sg_page(sg);
3290 3290
3291 kaddr = kmap(page); 3291 kaddr = kmap(page);
3292 3292
@@ -3297,7 +3297,7 @@ static int pmcraid_copy_sglist(
3297 3297
3298 kunmap(page); 3298 kunmap(page);
3299 3299
3300 scatterlist[i].length = len % bsize_elem; 3300 sg->length = len % bsize_elem;
3301 } 3301 }
3302 3302
3303 if (rc) { 3303 if (rc) {
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 35213082e933..a406cc825426 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -590,7 +590,7 @@ static int ppa_completion(struct scsi_cmnd *cmd)
590 if (cmd->SCp.buffer && !cmd->SCp.this_residual) { 590 if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
591 /* if scatter/gather, advance to the next segment */ 591 /* if scatter/gather, advance to the next segment */
592 if (cmd->SCp.buffers_residual--) { 592 if (cmd->SCp.buffers_residual--) {
593 cmd->SCp.buffer++; 593 cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
594 cmd->SCp.this_residual = 594 cmd->SCp.this_residual =
595 cmd->SCp.buffer->length; 595 cmd->SCp.buffer->length;
596 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 596 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a2fa31417749..e1da8c70a266 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -40,6 +40,18 @@
40#include "scsi_priv.h" 40#include "scsi_priv.h"
41#include "scsi_logging.h" 41#include "scsi_logging.h"
42 42
43/*
44 * Size of integrity metadata is usually small, 1 inline sg should
45 * cover normal cases.
46 */
47#ifdef CONFIG_ARCH_NO_SG_CHAIN
48#define SCSI_INLINE_PROT_SG_CNT 0
49#define SCSI_INLINE_SG_CNT 0
50#else
51#define SCSI_INLINE_PROT_SG_CNT 1
52#define SCSI_INLINE_SG_CNT 2
53#endif
54
43static struct kmem_cache *scsi_sdb_cache; 55static struct kmem_cache *scsi_sdb_cache;
44static struct kmem_cache *scsi_sense_cache; 56static struct kmem_cache *scsi_sense_cache;
45static struct kmem_cache *scsi_sense_isadma_cache; 57static struct kmem_cache *scsi_sense_isadma_cache;
@@ -542,9 +554,11 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
542static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) 554static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
543{ 555{
544 if (cmd->sdb.table.nents) 556 if (cmd->sdb.table.nents)
545 sg_free_table_chained(&cmd->sdb.table, true); 557 sg_free_table_chained(&cmd->sdb.table,
558 SCSI_INLINE_SG_CNT);
546 if (scsi_prot_sg_count(cmd)) 559 if (scsi_prot_sg_count(cmd))
547 sg_free_table_chained(&cmd->prot_sdb->table, true); 560 sg_free_table_chained(&cmd->prot_sdb->table,
561 SCSI_INLINE_PROT_SG_CNT);
548} 562}
549 563
550static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) 564static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
@@ -977,7 +991,8 @@ static blk_status_t scsi_init_sgtable(struct request *req,
977 * If sg table allocation fails, requeue request later. 991 * If sg table allocation fails, requeue request later.
978 */ 992 */
979 if (unlikely(sg_alloc_table_chained(&sdb->table, 993 if (unlikely(sg_alloc_table_chained(&sdb->table,
980 blk_rq_nr_phys_segments(req), sdb->table.sgl))) 994 blk_rq_nr_phys_segments(req), sdb->table.sgl,
995 SCSI_INLINE_SG_CNT)))
981 return BLK_STS_RESOURCE; 996 return BLK_STS_RESOURCE;
982 997
983 /* 998 /*
@@ -1031,7 +1046,8 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
1031 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1046 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1032 1047
1033 if (sg_alloc_table_chained(&prot_sdb->table, ivecs, 1048 if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1034 prot_sdb->table.sgl)) { 1049 prot_sdb->table.sgl,
1050 SCSI_INLINE_PROT_SG_CNT)) {
1035 ret = BLK_STS_RESOURCE; 1051 ret = BLK_STS_RESOURCE;
1036 goto out_free_sgtables; 1052 goto out_free_sgtables;
1037 } 1053 }
@@ -1542,9 +1558,9 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1542} 1558}
1543 1559
1544/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ 1560/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
1545static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost) 1561static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1546{ 1562{
1547 return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) * 1563 return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1548 sizeof(struct scatterlist); 1564 sizeof(struct scatterlist);
1549} 1565}
1550 1566
@@ -1726,7 +1742,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1726 if (scsi_host_get_prot(shost)) { 1742 if (scsi_host_get_prot(shost)) {
1727 sg = (void *)cmd + sizeof(struct scsi_cmnd) + 1743 sg = (void *)cmd + sizeof(struct scsi_cmnd) +
1728 shost->hostt->cmd_size; 1744 shost->hostt->cmd_size;
1729 cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost); 1745 cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1730 } 1746 }
1731 1747
1732 return 0; 1748 return 0;
@@ -1820,10 +1836,11 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
1820{ 1836{
1821 unsigned int cmd_size, sgl_size; 1837 unsigned int cmd_size, sgl_size;
1822 1838
1823 sgl_size = scsi_mq_sgl_size(shost); 1839 sgl_size = scsi_mq_inline_sgl_size(shost);
1824 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; 1840 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1825 if (scsi_host_get_prot(shost)) 1841 if (scsi_host_get_prot(shost))
1826 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; 1842 cmd_size += sizeof(struct scsi_data_buffer) +
1843 sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
1827 1844
1828 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); 1845 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
1829 shost->tag_set.ops = &scsi_mq_ops; 1846 shost->tag_set.ops = &scsi_mq_ops;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 377b07b2feeb..70008816c91f 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -335,7 +335,7 @@ static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
335 BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); 335 BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
336 336
337 sge = &ctx->sgl->sge[0]; 337 sge = &ctx->sgl->sge[0];
338 for (i = 0; i < count; i++, sg++) { 338 for (i = 0; i < count; i++, sg = sg_next(sg)) {
339 sge[i].addr = sg_dma_address(sg); 339 sge[i].addr = sg_dma_address(sg);
340 sge[i].length = sg_dma_len(sg); 340 sge[i].length = sg_dma_len(sg);
341 sge[i].flags = 0; 341 sge[i].flags = 0;
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index f965a3ee9ce5..fb7b289fa09f 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -735,7 +735,7 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
735 * source or destination for THIS transfer. 735 * source or destination for THIS transfer.
736 */ 736 */
737 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { 737 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
738 ++cmd->SCp.buffer; 738 cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
739 --cmd->SCp.buffers_residual; 739 --cmd->SCp.buffers_residual;
740 cmd->SCp.this_residual = cmd->SCp.buffer->length; 740 cmd->SCp.this_residual = cmd->SCp.buffer->length;
741 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 741 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 2dad36a05518..dd979ee4dcf1 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -871,12 +871,11 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
871 return; 871 return;
872 } 872 }
873 873
874 sg = scsi_sglist(scsicmd); 874 scsi_for_each_sg(scsicmd, sg, scsi_sg_count(scsicmd), i) {
875 for (i = 0; i < scsi_sg_count(scsicmd); i++) { 875 this_page_orig = kmap_atomic(sg_page(sg));
876 this_page_orig = kmap_atomic(sg_page(sg + i));
877 this_page = (void *)((unsigned long)this_page_orig | 876 this_page = (void *)((unsigned long)this_page_orig |
878 sg[i].offset); 877 sg->offset);
879 memcpy(this_page, buf + bufind, sg[i].length); 878 memcpy(this_page, buf + bufind, sg->length);
880 kunmap_atomic(this_page_orig); 879 kunmap_atomic(this_page_orig);
881 } 880 }
882 kfree(buf); 881 kfree(buf);
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 607be1f4fe27..0a57c2cc8e5a 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -488,7 +488,6 @@ static void mts_command_done( struct urb *transfer )
488 488
489static void mts_do_sg (struct urb* transfer) 489static void mts_do_sg (struct urb* transfer)
490{ 490{
491 struct scatterlist * sg;
492 int status = transfer->status; 491 int status = transfer->status;
493 MTS_INT_INIT(); 492 MTS_INT_INIT();
494 493
@@ -500,13 +499,12 @@ static void mts_do_sg (struct urb* transfer)
500 mts_transfer_cleanup(transfer); 499 mts_transfer_cleanup(transfer);
501 } 500 }
502 501
503 sg = scsi_sglist(context->srb); 502 context->curr_sg = sg_next(context->curr_sg);
504 context->fragment++;
505 mts_int_submit_urb(transfer, 503 mts_int_submit_urb(transfer,
506 context->data_pipe, 504 context->data_pipe,
507 sg_virt(&sg[context->fragment]), 505 sg_virt(context->curr_sg),
508 sg[context->fragment].length, 506 context->curr_sg->length,
509 context->fragment + 1 == scsi_sg_count(context->srb) ? 507 sg_is_last(context->curr_sg) ?
510 mts_data_done : mts_do_sg); 508 mts_data_done : mts_do_sg);
511} 509}
512 510
@@ -526,22 +524,20 @@ static void
526mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc) 524mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
527{ 525{
528 int pipe; 526 int pipe;
529 struct scatterlist * sg; 527
530
531 MTS_DEBUG_GOT_HERE(); 528 MTS_DEBUG_GOT_HERE();
532 529
533 desc->context.instance = desc; 530 desc->context.instance = desc;
534 desc->context.srb = srb; 531 desc->context.srb = srb;
535 desc->context.fragment = 0;
536 532
537 if (!scsi_bufflen(srb)) { 533 if (!scsi_bufflen(srb)) {
538 desc->context.data = NULL; 534 desc->context.data = NULL;
539 desc->context.data_length = 0; 535 desc->context.data_length = 0;
540 return; 536 return;
541 } else { 537 } else {
542 sg = scsi_sglist(srb); 538 desc->context.curr_sg = scsi_sglist(srb);
543 desc->context.data = sg_virt(&sg[0]); 539 desc->context.data = sg_virt(desc->context.curr_sg);
544 desc->context.data_length = sg[0].length; 540 desc->context.data_length = desc->context.curr_sg->length;
545 } 541 }
546 542
547 543
diff --git a/drivers/usb/image/microtek.h b/drivers/usb/image/microtek.h
index 66685e59241a..7bd5f4639c4a 100644
--- a/drivers/usb/image/microtek.h
+++ b/drivers/usb/image/microtek.h
@@ -21,7 +21,7 @@ struct mts_transfer_context
21 void *data; 21 void *data;
22 unsigned data_length; 22 unsigned data_length;
23 int data_pipe; 23 int data_pipe;
24 int fragment; 24 struct scatterlist *curr_sg;
25 25
26 u8 *scsi_status; /* status returned from ep_response after command completion */ 26 u8 *scsi_status; /* status returned from ep_response after command completion */
27}; 27};
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 30a9a55c28ba..6eec50fb36c8 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -266,10 +266,11 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
266typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); 266typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
267typedef void (sg_free_fn)(struct scatterlist *, unsigned int); 267typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
268 268
269void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *); 269void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
270 sg_free_fn *);
270void sg_free_table(struct sg_table *); 271void sg_free_table(struct sg_table *);
271int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, 272int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
272 struct scatterlist *, gfp_t, sg_alloc_fn *); 273 struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
273int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); 274int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
274int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, 275int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
275 unsigned int n_pages, unsigned int offset, 276 unsigned int n_pages, unsigned int offset,
@@ -331,9 +332,11 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
331#endif 332#endif
332 333
333#ifdef CONFIG_SG_POOL 334#ifdef CONFIG_SG_POOL
334void sg_free_table_chained(struct sg_table *table, bool first_chunk); 335void sg_free_table_chained(struct sg_table *table,
336 unsigned nents_first_chunk);
335int sg_alloc_table_chained(struct sg_table *table, int nents, 337int sg_alloc_table_chained(struct sg_table *table, int nents,
336 struct scatterlist *first_chunk); 338 struct scatterlist *first_chunk,
339 unsigned nents_first_chunk);
337#endif 340#endif
338 341
339/* 342/*
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index eacb82468437..c2cf2c311b7d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -179,7 +179,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
179 * __sg_free_table - Free a previously mapped sg table 179 * __sg_free_table - Free a previously mapped sg table
180 * @table: The sg table header to use 180 * @table: The sg table header to use
181 * @max_ents: The maximum number of entries per single scatterlist 181 * @max_ents: The maximum number of entries per single scatterlist
182 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk 182 * @nents_first_chunk: Number of entries int the (preallocated) first
183 * scatterlist chunk, 0 means no such preallocated first chunk
183 * @free_fn: Free function 184 * @free_fn: Free function
184 * 185 *
185 * Description: 186 * Description:
@@ -189,9 +190,10 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
189 * 190 *
190 **/ 191 **/
191void __sg_free_table(struct sg_table *table, unsigned int max_ents, 192void __sg_free_table(struct sg_table *table, unsigned int max_ents,
192 bool skip_first_chunk, sg_free_fn *free_fn) 193 unsigned int nents_first_chunk, sg_free_fn *free_fn)
193{ 194{
194 struct scatterlist *sgl, *next; 195 struct scatterlist *sgl, *next;
196 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
195 197
196 if (unlikely(!table->sgl)) 198 if (unlikely(!table->sgl))
197 return; 199 return;
@@ -207,9 +209,9 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
207 * sg_size is then one less than alloc size, since the last 209 * sg_size is then one less than alloc size, since the last
208 * element is the chain pointer. 210 * element is the chain pointer.
209 */ 211 */
210 if (alloc_size > max_ents) { 212 if (alloc_size > curr_max_ents) {
211 next = sg_chain_ptr(&sgl[max_ents - 1]); 213 next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
212 alloc_size = max_ents; 214 alloc_size = curr_max_ents;
213 sg_size = alloc_size - 1; 215 sg_size = alloc_size - 1;
214 } else { 216 } else {
215 sg_size = alloc_size; 217 sg_size = alloc_size;
@@ -217,11 +219,12 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
217 } 219 }
218 220
219 table->orig_nents -= sg_size; 221 table->orig_nents -= sg_size;
220 if (skip_first_chunk) 222 if (nents_first_chunk)
221 skip_first_chunk = false; 223 nents_first_chunk = 0;
222 else 224 else
223 free_fn(sgl, alloc_size); 225 free_fn(sgl, alloc_size);
224 sgl = next; 226 sgl = next;
227 curr_max_ents = max_ents;
225 } 228 }
226 229
227 table->sgl = NULL; 230 table->sgl = NULL;
@@ -244,6 +247,8 @@ EXPORT_SYMBOL(sg_free_table);
244 * @table: The sg table header to use 247 * @table: The sg table header to use
245 * @nents: Number of entries in sg list 248 * @nents: Number of entries in sg list
246 * @max_ents: The maximum number of entries the allocator returns per call 249 * @max_ents: The maximum number of entries the allocator returns per call
250 * @nents_first_chunk: Number of entries int the (preallocated) first
251 * scatterlist chunk, 0 means no such preallocated chunk provided by user
247 * @gfp_mask: GFP allocation mask 252 * @gfp_mask: GFP allocation mask
248 * @alloc_fn: Allocator to use 253 * @alloc_fn: Allocator to use
249 * 254 *
@@ -260,10 +265,13 @@ EXPORT_SYMBOL(sg_free_table);
260 **/ 265 **/
261int __sg_alloc_table(struct sg_table *table, unsigned int nents, 266int __sg_alloc_table(struct sg_table *table, unsigned int nents,
262 unsigned int max_ents, struct scatterlist *first_chunk, 267 unsigned int max_ents, struct scatterlist *first_chunk,
263 gfp_t gfp_mask, sg_alloc_fn *alloc_fn) 268 unsigned int nents_first_chunk, gfp_t gfp_mask,
269 sg_alloc_fn *alloc_fn)
264{ 270{
265 struct scatterlist *sg, *prv; 271 struct scatterlist *sg, *prv;
266 unsigned int left; 272 unsigned int left;
273 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
274 unsigned prv_max_ents;
267 275
268 memset(table, 0, sizeof(*table)); 276 memset(table, 0, sizeof(*table));
269 277
@@ -279,8 +287,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
279 do { 287 do {
280 unsigned int sg_size, alloc_size = left; 288 unsigned int sg_size, alloc_size = left;
281 289
282 if (alloc_size > max_ents) { 290 if (alloc_size > curr_max_ents) {
283 alloc_size = max_ents; 291 alloc_size = curr_max_ents;
284 sg_size = alloc_size - 1; 292 sg_size = alloc_size - 1;
285 } else 293 } else
286 sg_size = alloc_size; 294 sg_size = alloc_size;
@@ -314,7 +322,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
314 * If this is not the first mapping, chain previous part. 322 * If this is not the first mapping, chain previous part.
315 */ 323 */
316 if (prv) 324 if (prv)
317 sg_chain(prv, max_ents, sg); 325 sg_chain(prv, prv_max_ents, sg);
318 else 326 else
319 table->sgl = sg; 327 table->sgl = sg;
320 328
@@ -325,6 +333,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
325 sg_mark_end(&sg[sg_size - 1]); 333 sg_mark_end(&sg[sg_size - 1]);
326 334
327 prv = sg; 335 prv = sg;
336 prv_max_ents = curr_max_ents;
337 curr_max_ents = max_ents;
328 } while (left); 338 } while (left);
329 339
330 return 0; 340 return 0;
@@ -347,9 +357,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
347 int ret; 357 int ret;
348 358
349 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, 359 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
350 NULL, gfp_mask, sg_kmalloc); 360 NULL, 0, gfp_mask, sg_kmalloc);
351 if (unlikely(ret)) 361 if (unlikely(ret))
352 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); 362 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
353 363
354 return ret; 364 return ret;
355} 365}
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index cff20df2695e..db29e5c1f790 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -70,18 +70,27 @@ static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
70/** 70/**
71 * sg_free_table_chained - Free a previously mapped sg table 71 * sg_free_table_chained - Free a previously mapped sg table
72 * @table: The sg table header to use 72 * @table: The sg table header to use
73 * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained? 73 * @nents_first_chunk: size of the first_chunk SGL passed to
74 * sg_alloc_table_chained
74 * 75 *
75 * Description: 76 * Description:
76 * Free an sg table previously allocated and setup with 77 * Free an sg table previously allocated and setup with
77 * sg_alloc_table_chained(). 78 * sg_alloc_table_chained().
78 * 79 *
80 * @nents_first_chunk has to be same with that same parameter passed
81 * to sg_alloc_table_chained().
82 *
79 **/ 83 **/
80void sg_free_table_chained(struct sg_table *table, bool first_chunk) 84void sg_free_table_chained(struct sg_table *table,
85 unsigned nents_first_chunk)
81{ 86{
82 if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE) 87 if (table->orig_nents <= nents_first_chunk)
83 return; 88 return;
84 __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free); 89
90 if (nents_first_chunk == 1)
91 nents_first_chunk = 0;
92
93 __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
85} 94}
86EXPORT_SYMBOL_GPL(sg_free_table_chained); 95EXPORT_SYMBOL_GPL(sg_free_table_chained);
87 96
@@ -90,31 +99,41 @@ EXPORT_SYMBOL_GPL(sg_free_table_chained);
90 * @table: The sg table header to use 99 * @table: The sg table header to use
91 * @nents: Number of entries in sg list 100 * @nents: Number of entries in sg list
92 * @first_chunk: first SGL 101 * @first_chunk: first SGL
102 * @nents_first_chunk: number of the SGL of @first_chunk
93 * 103 *
94 * Description: 104 * Description:
95 * Allocate and chain SGLs in an sg table. If @nents@ is larger than 105 * Allocate and chain SGLs in an sg table. If @nents@ is larger than
96 * SG_CHUNK_SIZE a chained sg table will be setup. 106 * @nents_first_chunk a chained sg table will be setup. @first_chunk is
107 * ignored if nents_first_chunk <= 1 because user expects the SGL points
108 * non-chain SGL.
97 * 109 *
98 **/ 110 **/
99int sg_alloc_table_chained(struct sg_table *table, int nents, 111int sg_alloc_table_chained(struct sg_table *table, int nents,
100 struct scatterlist *first_chunk) 112 struct scatterlist *first_chunk, unsigned nents_first_chunk)
101{ 113{
102 int ret; 114 int ret;
103 115
104 BUG_ON(!nents); 116 BUG_ON(!nents);
105 117
106 if (first_chunk) { 118 if (first_chunk && nents_first_chunk) {
107 if (nents <= SG_CHUNK_SIZE) { 119 if (nents <= nents_first_chunk) {
108 table->nents = table->orig_nents = nents; 120 table->nents = table->orig_nents = nents;
109 sg_init_table(table->sgl, nents); 121 sg_init_table(table->sgl, nents);
110 return 0; 122 return 0;
111 } 123 }
112 } 124 }
113 125
126 /* User supposes that the 1st SGL includes real entry */
127 if (nents_first_chunk <= 1) {
128 first_chunk = NULL;
129 nents_first_chunk = 0;
130 }
131
114 ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, 132 ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
115 first_chunk, GFP_ATOMIC, sg_pool_alloc); 133 first_chunk, nents_first_chunk,
134 GFP_ATOMIC, sg_pool_alloc);
116 if (unlikely(ret)) 135 if (unlikely(ret))
117 sg_free_table_chained(table, (bool)first_chunk); 136 sg_free_table_chained(table, nents_first_chunk);
118 return ret; 137 return ret;
119} 138}
120EXPORT_SYMBOL_GPL(sg_alloc_table_chained); 139EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 2121c9b4d275..48fe3b16b0d9 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -73,7 +73,8 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
73 73
74 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; 74 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
75 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, 75 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
76 ctxt->rw_sg_table.sgl)) { 76 ctxt->rw_sg_table.sgl,
77 SG_CHUNK_SIZE)) {
77 kfree(ctxt); 78 kfree(ctxt);
78 ctxt = NULL; 79 ctxt = NULL;
79 } 80 }
@@ -84,7 +85,7 @@ out:
84static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, 85static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
85 struct svc_rdma_rw_ctxt *ctxt) 86 struct svc_rdma_rw_ctxt *ctxt)
86{ 87{
87 sg_free_table_chained(&ctxt->rw_sg_table, true); 88 sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
88 89
89 spin_lock(&rdma->sc_rw_ctxt_lock); 90 spin_lock(&rdma->sc_rw_ctxt_lock);
90 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); 91 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);