diff options
-rw-r--r-- | drivers/nvme/host/fc.c | 7 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 7 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 4 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 10 | ||||
-rw-r--r-- | include/linux/scatterlist.h | 11 | ||||
-rw-r--r-- | lib/scatterlist.c | 36 | ||||
-rw-r--r-- | lib/sg_pool.c | 37 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_rw.c | 5 |
8 files changed, 76 insertions, 41 deletions
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index dd8169bbf0d2..46811caac9d2 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -2112,7 +2112,8 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |||
2112 | 2112 | ||
2113 | freq->sg_table.sgl = freq->first_sgl; | 2113 | freq->sg_table.sgl = freq->first_sgl; |
2114 | ret = sg_alloc_table_chained(&freq->sg_table, | 2114 | ret = sg_alloc_table_chained(&freq->sg_table, |
2115 | blk_rq_nr_phys_segments(rq), freq->sg_table.sgl); | 2115 | blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, |
2116 | SG_CHUNK_SIZE); | ||
2116 | if (ret) | 2117 | if (ret) |
2117 | return -ENOMEM; | 2118 | return -ENOMEM; |
2118 | 2119 | ||
@@ -2122,7 +2123,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |||
2122 | freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, | 2123 | freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, |
2123 | op->nents, dir); | 2124 | op->nents, dir); |
2124 | if (unlikely(freq->sg_cnt <= 0)) { | 2125 | if (unlikely(freq->sg_cnt <= 0)) { |
2125 | sg_free_table_chained(&freq->sg_table, true); | 2126 | sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); |
2126 | freq->sg_cnt = 0; | 2127 | freq->sg_cnt = 0; |
2127 | return -EFAULT; | 2128 | return -EFAULT; |
2128 | } | 2129 | } |
@@ -2148,7 +2149,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |||
2148 | 2149 | ||
2149 | nvme_cleanup_cmd(rq); | 2150 | nvme_cleanup_cmd(rq); |
2150 | 2151 | ||
2151 | sg_free_table_chained(&freq->sg_table, true); | 2152 | sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); |
2152 | 2153 | ||
2153 | freq->sg_cnt = 0; | 2154 | freq->sg_cnt = 0; |
2154 | } | 2155 | } |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f383146e7d0f..f7ea19b45798 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -1133,7 +1133,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, | |||
1133 | WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 1133 | WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
1134 | 1134 | ||
1135 | nvme_cleanup_cmd(rq); | 1135 | nvme_cleanup_cmd(rq); |
1136 | sg_free_table_chained(&req->sg_table, true); | 1136 | sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | static int nvme_rdma_set_sg_null(struct nvme_command *c) | 1139 | static int nvme_rdma_set_sg_null(struct nvme_command *c) |
@@ -1248,7 +1248,8 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, | |||
1248 | 1248 | ||
1249 | req->sg_table.sgl = req->first_sgl; | 1249 | req->sg_table.sgl = req->first_sgl; |
1250 | ret = sg_alloc_table_chained(&req->sg_table, | 1250 | ret = sg_alloc_table_chained(&req->sg_table, |
1251 | blk_rq_nr_phys_segments(rq), req->sg_table.sgl); | 1251 | blk_rq_nr_phys_segments(rq), req->sg_table.sgl, |
1252 | SG_CHUNK_SIZE); | ||
1252 | if (ret) | 1253 | if (ret) |
1253 | return -ENOMEM; | 1254 | return -ENOMEM; |
1254 | 1255 | ||
@@ -1288,7 +1289,7 @@ out_unmap_sg: | |||
1288 | req->nents, rq_data_dir(rq) == | 1289 | req->nents, rq_data_dir(rq) == |
1289 | WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 1290 | WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
1290 | out_free_table: | 1291 | out_free_table: |
1291 | sg_free_table_chained(&req->sg_table, true); | 1292 | sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); |
1292 | return ret; | 1293 | return ret; |
1293 | } | 1294 | } |
1294 | 1295 | ||
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 9e211ad6bdd3..b16dc3981c69 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
@@ -77,7 +77,7 @@ static void nvme_loop_complete_rq(struct request *req) | |||
77 | struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); | 77 | struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); |
78 | 78 | ||
79 | nvme_cleanup_cmd(req); | 79 | nvme_cleanup_cmd(req); |
80 | sg_free_table_chained(&iod->sg_table, true); | 80 | sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE); |
81 | nvme_complete_rq(req); | 81 | nvme_complete_rq(req); |
82 | } | 82 | } |
83 | 83 | ||
@@ -157,7 +157,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
157 | iod->sg_table.sgl = iod->first_sgl; | 157 | iod->sg_table.sgl = iod->first_sgl; |
158 | if (sg_alloc_table_chained(&iod->sg_table, | 158 | if (sg_alloc_table_chained(&iod->sg_table, |
159 | blk_rq_nr_phys_segments(req), | 159 | blk_rq_nr_phys_segments(req), |
160 | iod->sg_table.sgl)) | 160 | iod->sg_table.sgl, SG_CHUNK_SIZE)) |
161 | return BLK_STS_RESOURCE; | 161 | return BLK_STS_RESOURCE; |
162 | 162 | ||
163 | iod->req.sg = iod->sg_table.sgl; | 163 | iod->req.sg = iod->sg_table.sgl; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0916bd6d22b0..acc0f7080f18 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -541,9 +541,9 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) | |||
541 | static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) | 541 | static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) |
542 | { | 542 | { |
543 | if (cmd->sdb.table.nents) | 543 | if (cmd->sdb.table.nents) |
544 | sg_free_table_chained(&cmd->sdb.table, true); | 544 | sg_free_table_chained(&cmd->sdb.table, SG_CHUNK_SIZE); |
545 | if (scsi_prot_sg_count(cmd)) | 545 | if (scsi_prot_sg_count(cmd)) |
546 | sg_free_table_chained(&cmd->prot_sdb->table, true); | 546 | sg_free_table_chained(&cmd->prot_sdb->table, SG_CHUNK_SIZE); |
547 | } | 547 | } |
548 | 548 | ||
549 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) | 549 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) |
@@ -976,7 +976,8 @@ static blk_status_t scsi_init_sgtable(struct request *req, | |||
976 | * If sg table allocation fails, requeue request later. | 976 | * If sg table allocation fails, requeue request later. |
977 | */ | 977 | */ |
978 | if (unlikely(sg_alloc_table_chained(&sdb->table, | 978 | if (unlikely(sg_alloc_table_chained(&sdb->table, |
979 | blk_rq_nr_phys_segments(req), sdb->table.sgl))) | 979 | blk_rq_nr_phys_segments(req), sdb->table.sgl, |
980 | SG_CHUNK_SIZE))) | ||
980 | return BLK_STS_RESOURCE; | 981 | return BLK_STS_RESOURCE; |
981 | 982 | ||
982 | /* | 983 | /* |
@@ -1030,7 +1031,8 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) | |||
1030 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); | 1031 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); |
1031 | 1032 | ||
1032 | if (sg_alloc_table_chained(&prot_sdb->table, ivecs, | 1033 | if (sg_alloc_table_chained(&prot_sdb->table, ivecs, |
1033 | prot_sdb->table.sgl)) { | 1034 | prot_sdb->table.sgl, |
1035 | SG_CHUNK_SIZE)) { | ||
1034 | ret = BLK_STS_RESOURCE; | 1036 | ret = BLK_STS_RESOURCE; |
1035 | goto out_free_sgtables; | 1037 | goto out_free_sgtables; |
1036 | } | 1038 | } |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 30a9a55c28ba..6eec50fb36c8 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -266,10 +266,11 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents, | |||
266 | typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); | 266 | typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); |
267 | typedef void (sg_free_fn)(struct scatterlist *, unsigned int); | 267 | typedef void (sg_free_fn)(struct scatterlist *, unsigned int); |
268 | 268 | ||
269 | void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *); | 269 | void __sg_free_table(struct sg_table *, unsigned int, unsigned int, |
270 | sg_free_fn *); | ||
270 | void sg_free_table(struct sg_table *); | 271 | void sg_free_table(struct sg_table *); |
271 | int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, | 272 | int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, |
272 | struct scatterlist *, gfp_t, sg_alloc_fn *); | 273 | struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); |
273 | int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); | 274 | int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); |
274 | int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, | 275 | int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, |
275 | unsigned int n_pages, unsigned int offset, | 276 | unsigned int n_pages, unsigned int offset, |
@@ -331,9 +332,11 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, | |||
331 | #endif | 332 | #endif |
332 | 333 | ||
333 | #ifdef CONFIG_SG_POOL | 334 | #ifdef CONFIG_SG_POOL |
334 | void sg_free_table_chained(struct sg_table *table, bool first_chunk); | 335 | void sg_free_table_chained(struct sg_table *table, |
336 | unsigned nents_first_chunk); | ||
335 | int sg_alloc_table_chained(struct sg_table *table, int nents, | 337 | int sg_alloc_table_chained(struct sg_table *table, int nents, |
336 | struct scatterlist *first_chunk); | 338 | struct scatterlist *first_chunk, |
339 | unsigned nents_first_chunk); | ||
337 | #endif | 340 | #endif |
338 | 341 | ||
339 | /* | 342 | /* |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 739dc9fe2c55..77ec8eec3fd0 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -181,7 +181,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) | |||
181 | * __sg_free_table - Free a previously mapped sg table | 181 | * __sg_free_table - Free a previously mapped sg table |
182 | * @table: The sg table header to use | 182 | * @table: The sg table header to use |
183 | * @max_ents: The maximum number of entries per single scatterlist | 183 | * @max_ents: The maximum number of entries per single scatterlist |
184 | * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk | 184 | * @nents_first_chunk: Number of entries int the (preallocated) first |
185 | * scatterlist chunk, 0 means no such preallocated first chunk | ||
185 | * @free_fn: Free function | 186 | * @free_fn: Free function |
186 | * | 187 | * |
187 | * Description: | 188 | * Description: |
@@ -191,9 +192,10 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) | |||
191 | * | 192 | * |
192 | **/ | 193 | **/ |
193 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, | 194 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
194 | bool skip_first_chunk, sg_free_fn *free_fn) | 195 | unsigned int nents_first_chunk, sg_free_fn *free_fn) |
195 | { | 196 | { |
196 | struct scatterlist *sgl, *next; | 197 | struct scatterlist *sgl, *next; |
198 | unsigned curr_max_ents = nents_first_chunk ?: max_ents; | ||
197 | 199 | ||
198 | if (unlikely(!table->sgl)) | 200 | if (unlikely(!table->sgl)) |
199 | return; | 201 | return; |
@@ -209,9 +211,9 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, | |||
209 | * sg_size is then one less than alloc size, since the last | 211 | * sg_size is then one less than alloc size, since the last |
210 | * element is the chain pointer. | 212 | * element is the chain pointer. |
211 | */ | 213 | */ |
212 | if (alloc_size > max_ents) { | 214 | if (alloc_size > curr_max_ents) { |
213 | next = sg_chain_ptr(&sgl[max_ents - 1]); | 215 | next = sg_chain_ptr(&sgl[curr_max_ents - 1]); |
214 | alloc_size = max_ents; | 216 | alloc_size = curr_max_ents; |
215 | sg_size = alloc_size - 1; | 217 | sg_size = alloc_size - 1; |
216 | } else { | 218 | } else { |
217 | sg_size = alloc_size; | 219 | sg_size = alloc_size; |
@@ -219,11 +221,12 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, | |||
219 | } | 221 | } |
220 | 222 | ||
221 | table->orig_nents -= sg_size; | 223 | table->orig_nents -= sg_size; |
222 | if (skip_first_chunk) | 224 | if (nents_first_chunk) |
223 | skip_first_chunk = false; | 225 | nents_first_chunk = 0; |
224 | else | 226 | else |
225 | free_fn(sgl, alloc_size); | 227 | free_fn(sgl, alloc_size); |
226 | sgl = next; | 228 | sgl = next; |
229 | curr_max_ents = max_ents; | ||
227 | } | 230 | } |
228 | 231 | ||
229 | table->sgl = NULL; | 232 | table->sgl = NULL; |
@@ -246,6 +249,8 @@ EXPORT_SYMBOL(sg_free_table); | |||
246 | * @table: The sg table header to use | 249 | * @table: The sg table header to use |
247 | * @nents: Number of entries in sg list | 250 | * @nents: Number of entries in sg list |
248 | * @max_ents: The maximum number of entries the allocator returns per call | 251 | * @max_ents: The maximum number of entries the allocator returns per call |
252 | * @nents_first_chunk: Number of entries int the (preallocated) first | ||
253 | * scatterlist chunk, 0 means no such preallocated chunk provided by user | ||
249 | * @gfp_mask: GFP allocation mask | 254 | * @gfp_mask: GFP allocation mask |
250 | * @alloc_fn: Allocator to use | 255 | * @alloc_fn: Allocator to use |
251 | * | 256 | * |
@@ -262,10 +267,13 @@ EXPORT_SYMBOL(sg_free_table); | |||
262 | **/ | 267 | **/ |
263 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, | 268 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, |
264 | unsigned int max_ents, struct scatterlist *first_chunk, | 269 | unsigned int max_ents, struct scatterlist *first_chunk, |
265 | gfp_t gfp_mask, sg_alloc_fn *alloc_fn) | 270 | unsigned int nents_first_chunk, gfp_t gfp_mask, |
271 | sg_alloc_fn *alloc_fn) | ||
266 | { | 272 | { |
267 | struct scatterlist *sg, *prv; | 273 | struct scatterlist *sg, *prv; |
268 | unsigned int left; | 274 | unsigned int left; |
275 | unsigned curr_max_ents = nents_first_chunk ?: max_ents; | ||
276 | unsigned prv_max_ents; | ||
269 | 277 | ||
270 | memset(table, 0, sizeof(*table)); | 278 | memset(table, 0, sizeof(*table)); |
271 | 279 | ||
@@ -281,8 +289,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
281 | do { | 289 | do { |
282 | unsigned int sg_size, alloc_size = left; | 290 | unsigned int sg_size, alloc_size = left; |
283 | 291 | ||
284 | if (alloc_size > max_ents) { | 292 | if (alloc_size > curr_max_ents) { |
285 | alloc_size = max_ents; | 293 | alloc_size = curr_max_ents; |
286 | sg_size = alloc_size - 1; | 294 | sg_size = alloc_size - 1; |
287 | } else | 295 | } else |
288 | sg_size = alloc_size; | 296 | sg_size = alloc_size; |
@@ -316,7 +324,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
316 | * If this is not the first mapping, chain previous part. | 324 | * If this is not the first mapping, chain previous part. |
317 | */ | 325 | */ |
318 | if (prv) | 326 | if (prv) |
319 | sg_chain(prv, max_ents, sg); | 327 | sg_chain(prv, prv_max_ents, sg); |
320 | else | 328 | else |
321 | table->sgl = sg; | 329 | table->sgl = sg; |
322 | 330 | ||
@@ -327,6 +335,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
327 | sg_mark_end(&sg[sg_size - 1]); | 335 | sg_mark_end(&sg[sg_size - 1]); |
328 | 336 | ||
329 | prv = sg; | 337 | prv = sg; |
338 | prv_max_ents = curr_max_ents; | ||
339 | curr_max_ents = max_ents; | ||
330 | } while (left); | 340 | } while (left); |
331 | 341 | ||
332 | return 0; | 342 | return 0; |
@@ -349,9 +359,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
349 | int ret; | 359 | int ret; |
350 | 360 | ||
351 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, | 361 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, |
352 | NULL, gfp_mask, sg_kmalloc); | 362 | NULL, 0, gfp_mask, sg_kmalloc); |
353 | if (unlikely(ret)) | 363 | if (unlikely(ret)) |
354 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); | 364 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree); |
355 | 365 | ||
356 | return ret; | 366 | return ret; |
357 | } | 367 | } |
diff --git a/lib/sg_pool.c b/lib/sg_pool.c index d1c1e6388eaa..b3b8cf62ff49 100644 --- a/lib/sg_pool.c +++ b/lib/sg_pool.c | |||
@@ -69,18 +69,27 @@ static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask) | |||
69 | /** | 69 | /** |
70 | * sg_free_table_chained - Free a previously mapped sg table | 70 | * sg_free_table_chained - Free a previously mapped sg table |
71 | * @table: The sg table header to use | 71 | * @table: The sg table header to use |
72 | * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained? | 72 | * @nents_first_chunk: size of the first_chunk SGL passed to |
73 | * sg_alloc_table_chained | ||
73 | * | 74 | * |
74 | * Description: | 75 | * Description: |
75 | * Free an sg table previously allocated and setup with | 76 | * Free an sg table previously allocated and setup with |
76 | * sg_alloc_table_chained(). | 77 | * sg_alloc_table_chained(). |
77 | * | 78 | * |
79 | * @nents_first_chunk has to be same with that same parameter passed | ||
80 | * to sg_alloc_table_chained(). | ||
81 | * | ||
78 | **/ | 82 | **/ |
79 | void sg_free_table_chained(struct sg_table *table, bool first_chunk) | 83 | void sg_free_table_chained(struct sg_table *table, |
84 | unsigned nents_first_chunk) | ||
80 | { | 85 | { |
81 | if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE) | 86 | if (table->orig_nents <= nents_first_chunk) |
82 | return; | 87 | return; |
83 | __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free); | 88 | |
89 | if (nents_first_chunk == 1) | ||
90 | nents_first_chunk = 0; | ||
91 | |||
92 | __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free); | ||
84 | } | 93 | } |
85 | EXPORT_SYMBOL_GPL(sg_free_table_chained); | 94 | EXPORT_SYMBOL_GPL(sg_free_table_chained); |
86 | 95 | ||
@@ -89,31 +98,39 @@ EXPORT_SYMBOL_GPL(sg_free_table_chained); | |||
89 | * @table: The sg table header to use | 98 | * @table: The sg table header to use |
90 | * @nents: Number of entries in sg list | 99 | * @nents: Number of entries in sg list |
91 | * @first_chunk: first SGL | 100 | * @first_chunk: first SGL |
101 | * @nents_first_chunk: number of the SGL of @first_chunk | ||
92 | * | 102 | * |
93 | * Description: | 103 | * Description: |
94 | * Allocate and chain SGLs in an sg table. If @nents@ is larger than | 104 | * Allocate and chain SGLs in an sg table. If @nents@ is larger than |
95 | * SG_CHUNK_SIZE a chained sg table will be setup. | 105 | * @nents_first_chunk a chained sg table will be setup. |
96 | * | 106 | * |
97 | **/ | 107 | **/ |
98 | int sg_alloc_table_chained(struct sg_table *table, int nents, | 108 | int sg_alloc_table_chained(struct sg_table *table, int nents, |
99 | struct scatterlist *first_chunk) | 109 | struct scatterlist *first_chunk, unsigned nents_first_chunk) |
100 | { | 110 | { |
101 | int ret; | 111 | int ret; |
102 | 112 | ||
103 | BUG_ON(!nents); | 113 | BUG_ON(!nents); |
104 | 114 | ||
105 | if (first_chunk) { | 115 | if (first_chunk && nents_first_chunk) { |
106 | if (nents <= SG_CHUNK_SIZE) { | 116 | if (nents <= nents_first_chunk) { |
107 | table->nents = table->orig_nents = nents; | 117 | table->nents = table->orig_nents = nents; |
108 | sg_init_table(table->sgl, nents); | 118 | sg_init_table(table->sgl, nents); |
109 | return 0; | 119 | return 0; |
110 | } | 120 | } |
111 | } | 121 | } |
112 | 122 | ||
123 | /* User supposes that the 1st SGL includes real entry */ | ||
124 | if (nents_first_chunk == 1) { | ||
125 | first_chunk = NULL; | ||
126 | nents_first_chunk = 0; | ||
127 | } | ||
128 | |||
113 | ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, | 129 | ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, |
114 | first_chunk, GFP_ATOMIC, sg_pool_alloc); | 130 | first_chunk, nents_first_chunk, |
131 | GFP_ATOMIC, sg_pool_alloc); | ||
115 | if (unlikely(ret)) | 132 | if (unlikely(ret)) |
116 | sg_free_table_chained(table, (bool)first_chunk); | 133 | sg_free_table_chained(table, nents_first_chunk); |
117 | return ret; | 134 | return ret; |
118 | } | 135 | } |
119 | EXPORT_SYMBOL_GPL(sg_alloc_table_chained); | 136 | EXPORT_SYMBOL_GPL(sg_alloc_table_chained); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 2121c9b4d275..48fe3b16b0d9 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c | |||
@@ -73,7 +73,8 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) | |||
73 | 73 | ||
74 | ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; | 74 | ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; |
75 | if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, | 75 | if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, |
76 | ctxt->rw_sg_table.sgl)) { | 76 | ctxt->rw_sg_table.sgl, |
77 | SG_CHUNK_SIZE)) { | ||
77 | kfree(ctxt); | 78 | kfree(ctxt); |
78 | ctxt = NULL; | 79 | ctxt = NULL; |
79 | } | 80 | } |
@@ -84,7 +85,7 @@ out: | |||
84 | static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, | 85 | static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, |
85 | struct svc_rdma_rw_ctxt *ctxt) | 86 | struct svc_rdma_rw_ctxt *ctxt) |
86 | { | 87 | { |
87 | sg_free_table_chained(&ctxt->rw_sg_table, true); | 88 | sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE); |
88 | 89 | ||
89 | spin_lock(&rdma->sc_rw_ctxt_lock); | 90 | spin_lock(&rdma->sc_rw_ctxt_lock); |
90 | list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); | 91 | list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); |