aboutsummaryrefslogtreecommitdiffstats
path: root/lib/scatterlist.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-04-15 08:38:31 -0400
committerChristoph Hellwig <hch@lst.de>2014-07-25 17:16:21 -0400
commitc53c6d6a68b13b1dff2892551b56cfdc07887d9e (patch)
tree8bd6ddc1ff0f33e357ad6c230f62fd7b1cf1b28a /lib/scatterlist.c
parentf6d47e74fcb2814225e429c94355ad1c551daffb (diff)
scatterlist: allow chaining to preallocated chunks
Blk-mq drivers usually preallocate their S/G list as part of the request, but if we want to support the very large S/G lists currently supported by the SCSI code that would tie up a lot of memory in the preallocated request pool. Add support to the scatterlist code so that it can initialize a S/G list that uses a preallocated first chunks and dynamically allocated additional chunks. That way the scsi-mq code can preallocate a first page worth of S/G entries as part of the request, and dynamically extend the S/G list when needed. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Webb Scales <webbnh@hp.com> Acked-by: Jens Axboe <axboe@kernel.dk> Tested-by: Bart Van Assche <bvanassche@acm.org> Tested-by: Robert Elliott <elliott@hp.com>
Diffstat (limited to 'lib/scatterlist.c')
-rw-r--r--lib/scatterlist.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 3a8e8e8fb2a5..b4415fceb7e7 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -165,6 +165,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
165 * __sg_free_table - Free a previously mapped sg table 165 * __sg_free_table - Free a previously mapped sg table
166 * @table: The sg table header to use 166 * @table: The sg table header to use
167 * @max_ents: The maximum number of entries per single scatterlist 167 * @max_ents: The maximum number of entries per single scatterlist
168 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
168 * @free_fn: Free function 169 * @free_fn: Free function
169 * 170 *
170 * Description: 171 * Description:
@@ -174,7 +175,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
174 * 175 *
175 **/ 176 **/
176void __sg_free_table(struct sg_table *table, unsigned int max_ents, 177void __sg_free_table(struct sg_table *table, unsigned int max_ents,
177 sg_free_fn *free_fn) 178 bool skip_first_chunk, sg_free_fn *free_fn)
178{ 179{
179 struct scatterlist *sgl, *next; 180 struct scatterlist *sgl, *next;
180 181
@@ -202,7 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
202 } 203 }
203 204
204 table->orig_nents -= sg_size; 205 table->orig_nents -= sg_size;
205 free_fn(sgl, alloc_size); 206 if (!skip_first_chunk) {
207 free_fn(sgl, alloc_size);
208 skip_first_chunk = false;
209 }
206 sgl = next; 210 sgl = next;
207 } 211 }
208 212
@@ -217,7 +221,7 @@ EXPORT_SYMBOL(__sg_free_table);
217 **/ 221 **/
218void sg_free_table(struct sg_table *table) 222void sg_free_table(struct sg_table *table)
219{ 223{
220 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); 224 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
221} 225}
222EXPORT_SYMBOL(sg_free_table); 226EXPORT_SYMBOL(sg_free_table);
223 227
@@ -241,8 +245,8 @@ EXPORT_SYMBOL(sg_free_table);
241 * 245 *
242 **/ 246 **/
243int __sg_alloc_table(struct sg_table *table, unsigned int nents, 247int __sg_alloc_table(struct sg_table *table, unsigned int nents,
244 unsigned int max_ents, gfp_t gfp_mask, 248 unsigned int max_ents, struct scatterlist *first_chunk,
245 sg_alloc_fn *alloc_fn) 249 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
246{ 250{
247 struct scatterlist *sg, *prv; 251 struct scatterlist *sg, *prv;
248 unsigned int left; 252 unsigned int left;
@@ -269,7 +273,12 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
269 273
270 left -= sg_size; 274 left -= sg_size;
271 275
272 sg = alloc_fn(alloc_size, gfp_mask); 276 if (first_chunk) {
277 sg = first_chunk;
278 first_chunk = NULL;
279 } else {
280 sg = alloc_fn(alloc_size, gfp_mask);
281 }
273 if (unlikely(!sg)) { 282 if (unlikely(!sg)) {
274 /* 283 /*
275 * Adjust entry count to reflect that the last 284 * Adjust entry count to reflect that the last
@@ -324,9 +333,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
324 int ret; 333 int ret;
325 334
326 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, 335 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
327 gfp_mask, sg_kmalloc); 336 NULL, gfp_mask, sg_kmalloc);
328 if (unlikely(ret)) 337 if (unlikely(ret))
329 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); 338 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
330 339
331 return ret; 340 return ret;
332} 341}