aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lin <ming.l@ssi.samsung.com>2016-04-04 17:48:11 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2016-04-15 16:53:14 -0400
commit9b1d6c8950021ab007608d455fc9c398ecd25476 (patch)
tree6dabbc37da01e225dbf66f1c400b551f1baf10f9
parent65e8617fba17732b4c68d3369a621725838b6f28 (diff)
lib: scatterlist: move SG pool code from SCSI driver to lib/sg_pool.c
Now it's ready to move the mempool based SG chained allocator code from SCSI driver to lib/sg_pool.c, which will be compiled only based on a Kconfig symbol CONFIG_SG_POOL. SCSI selects CONFIG_SG_POOL. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lin <ming.l@ssi.samsung.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/scsi_lib.c137
-rw-r--r--include/linux/scatterlist.h25
-rw-r--r--include/scsi/scsi.h19
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Makefile1
-rw-r--r--lib/sg_pool.c172
7 files changed, 206 insertions, 156 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 0950567e6269..98e5d51a3346 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -17,6 +17,7 @@ config SCSI
17 tristate "SCSI device support" 17 tristate "SCSI device support"
18 depends on BLOCK 18 depends on BLOCK
19 select SCSI_DMA if HAS_DMA 19 select SCSI_DMA if HAS_DMA
20 select SG_POOL
20 ---help--- 21 ---help---
21 If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or 22 If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
22 any other SCSI device under Linux, say Y and make sure that you know 23 any other SCSI device under Linux, say Y and make sure that you know
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 8f776f1e95ce..b920c5dabf60 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -14,8 +14,6 @@
14#include <linux/completion.h> 14#include <linux/completion.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/init.h> 17#include <linux/init.h>
20#include <linux/pci.h> 18#include <linux/pci.h>
21#include <linux/delay.h> 19#include <linux/delay.h>
@@ -40,39 +38,6 @@
40#include "scsi_logging.h" 38#include "scsi_logging.h"
41 39
42 40
43#define SG_MEMPOOL_NR ARRAY_SIZE(sg_pools)
44#define SG_MEMPOOL_SIZE 2
45
46struct sg_pool {
47 size_t size;
48 char *name;
49 struct kmem_cache *slab;
50 mempool_t *pool;
51};
52
53#define SP(x) { .size = x, "sgpool-" __stringify(x) }
54#if (SG_CHUNK_SIZE < 32)
55#error SG_CHUNK_SIZE is too small (must be 32 or greater)
56#endif
57static struct sg_pool sg_pools[] = {
58 SP(8),
59 SP(16),
60#if (SG_CHUNK_SIZE > 32)
61 SP(32),
62#if (SG_CHUNK_SIZE > 64)
63 SP(64),
64#if (SG_CHUNK_SIZE > 128)
65 SP(128),
66#if (SG_CHUNK_SIZE > 256)
67#error SG_CHUNK_SIZE is too large (256 MAX)
68#endif
69#endif
70#endif
71#endif
72 SP(SG_CHUNK_SIZE)
73};
74#undef SP
75
76struct kmem_cache *scsi_sdb_cache; 41struct kmem_cache *scsi_sdb_cache;
77 42
78/* 43/*
@@ -553,65 +518,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
553 scsi_run_queue(sdev->request_queue); 518 scsi_run_queue(sdev->request_queue);
554} 519}
555 520
556static inline unsigned int sg_pool_index(unsigned short nents)
557{
558 unsigned int index;
559
560 BUG_ON(nents > SG_CHUNK_SIZE);
561
562 if (nents <= 8)
563 index = 0;
564 else
565 index = get_count_order(nents) - 3;
566
567 return index;
568}
569
570static void sg_pool_free(struct scatterlist *sgl, unsigned int nents)
571{
572 struct sg_pool *sgp;
573
574 sgp = sg_pools + sg_pool_index(nents);
575 mempool_free(sgl, sgp->pool);
576}
577
578static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
579{
580 struct sg_pool *sgp;
581
582 sgp = sg_pools + sg_pool_index(nents);
583 return mempool_alloc(sgp->pool, gfp_mask);
584}
585
586static void sg_free_table_chained(struct sg_table *table, bool first_chunk)
587{
588 if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
589 return;
590 __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
591}
592
593static int sg_alloc_table_chained(struct sg_table *table, int nents,
594 struct scatterlist *first_chunk)
595{
596 int ret;
597
598 BUG_ON(!nents);
599
600 if (first_chunk) {
601 if (nents <= SG_CHUNK_SIZE) {
602 table->nents = table->orig_nents = nents;
603 sg_init_table(table->sgl, nents);
604 return 0;
605 }
606 }
607
608 ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
609 first_chunk, GFP_ATOMIC, sg_pool_alloc);
610 if (unlikely(ret))
611 sg_free_table_chained(table, (bool)first_chunk);
612 return ret;
613}
614
615static void scsi_uninit_cmd(struct scsi_cmnd *cmd) 521static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
616{ 522{
617 if (cmd->request->cmd_type == REQ_TYPE_FS) { 523 if (cmd->request->cmd_type == REQ_TYPE_FS) {
@@ -2269,8 +2175,6 @@ EXPORT_SYMBOL(scsi_unblock_requests);
2269 2175
2270int __init scsi_init_queue(void) 2176int __init scsi_init_queue(void)
2271{ 2177{
2272 int i;
2273
2274 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 2178 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
2275 sizeof(struct scsi_data_buffer), 2179 sizeof(struct scsi_data_buffer),
2276 0, 0, NULL); 2180 0, 0, NULL);
@@ -2279,53 +2183,12 @@ int __init scsi_init_queue(void)
2279 return -ENOMEM; 2183 return -ENOMEM;
2280 } 2184 }
2281 2185
2282 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2283 struct sg_pool *sgp = sg_pools + i;
2284 int size = sgp->size * sizeof(struct scatterlist);
2285
2286 sgp->slab = kmem_cache_create(sgp->name, size, 0,
2287 SLAB_HWCACHE_ALIGN, NULL);
2288 if (!sgp->slab) {
2289 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
2290 sgp->name);
2291 goto cleanup_sdb;
2292 }
2293
2294 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
2295 sgp->slab);
2296 if (!sgp->pool) {
2297 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
2298 sgp->name);
2299 goto cleanup_sdb;
2300 }
2301 }
2302
2303 return 0; 2186 return 0;
2304
2305cleanup_sdb:
2306 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2307 struct sg_pool *sgp = sg_pools + i;
2308 if (sgp->pool)
2309 mempool_destroy(sgp->pool);
2310 if (sgp->slab)
2311 kmem_cache_destroy(sgp->slab);
2312 }
2313 kmem_cache_destroy(scsi_sdb_cache);
2314
2315 return -ENOMEM;
2316} 2187}
2317 2188
2318void scsi_exit_queue(void) 2189void scsi_exit_queue(void)
2319{ 2190{
2320 int i;
2321
2322 kmem_cache_destroy(scsi_sdb_cache); 2191 kmem_cache_destroy(scsi_sdb_cache);
2323
2324 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2325 struct sg_pool *sgp = sg_pools + i;
2326 mempool_destroy(sgp->pool);
2327 kmem_cache_destroy(sgp->slab);
2328 }
2329} 2192}
2330 2193
2331/** 2194/**
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 556ec1ea2574..cb3c8fe6acd7 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -286,6 +286,31 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
286#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) 286#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
287 287
288/* 288/*
289 * The maximum number of SG segments that we will put inside a
290 * scatterlist (unless chaining is used). Should ideally fit inside a
291 * single page, to avoid a higher order allocation. We could define this
292 * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
293 * minimum value is 32
294 */
295#define SG_CHUNK_SIZE 128
296
297/*
298 * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
299 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
300 */
301#ifdef CONFIG_ARCH_HAS_SG_CHAIN
302#define SG_MAX_SEGMENTS 2048
303#else
304#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
305#endif
306
307#ifdef CONFIG_SG_POOL
308void sg_free_table_chained(struct sg_table *table, bool first_chunk);
309int sg_alloc_table_chained(struct sg_table *table, int nents,
310 struct scatterlist *first_chunk);
311#endif
312
313/*
289 * sg page iterator 314 * sg page iterator
290 * 315 *
291 * Iterates over sg entries page-by-page. On each successful iteration, 316 * Iterates over sg entries page-by-page. On each successful iteration,
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 74dafa75bae7..8ec7c30e35af 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -18,25 +18,6 @@ enum scsi_timeouts {
18}; 18};
19 19
20/* 20/*
21 * The maximum number of SG segments that we will put inside a
22 * scatterlist (unless chaining is used). Should ideally fit inside a
23 * single page, to avoid a higher order allocation. We could define this
24 * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
25 * minimum value is 32
26 */
27#define SG_CHUNK_SIZE 128
28
29/*
30 * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
31 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
32 */
33#ifdef CONFIG_ARCH_HAS_SG_CHAIN
34#define SG_MAX_SEGMENTS 2048
35#else
36#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
37#endif
38
39/*
40 * DIX-capable adapters effectively support infinite chaining for the 21 * DIX-capable adapters effectively support infinite chaining for the
41 * protection information scatterlist 22 * protection information scatterlist
42 */ 23 */
diff --git a/lib/Kconfig b/lib/Kconfig
index 3cca1222578e..61d55bd0ed89 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -523,6 +523,13 @@ config SG_SPLIT
523 a scatterlist. This should be selected by a driver or an API which 523 a scatterlist. This should be selected by a driver or an API which
524 whishes to split a scatterlist amongst multiple DMA channels. 524 whishes to split a scatterlist amongst multiple DMA channels.
525 525
526config SG_POOL
527 def_bool n
528 help
529 Provides a helper to allocate chained scatterlists. This should be
530 selected by a driver or an API which whishes to allocate chained
531 scatterlist.
532
526# 533#
527# sg chaining option 534# sg chaining option
528# 535#
diff --git a/lib/Makefile b/lib/Makefile
index 7bd6fd436c97..bf01c2673423 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -178,6 +178,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
178obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o 178obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
179 179
180obj-$(CONFIG_SG_SPLIT) += sg_split.o 180obj-$(CONFIG_SG_SPLIT) += sg_split.o
181obj-$(CONFIG_SG_POOL) += sg_pool.o
181obj-$(CONFIG_STMP_DEVICE) += stmp_device.o 182obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
182obj-$(CONFIG_IRQ_POLL) += irq_poll.o 183obj-$(CONFIG_IRQ_POLL) += irq_poll.o
183 184
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
new file mode 100644
index 000000000000..6dd30615a201
--- /dev/null
+++ b/lib/sg_pool.c
@@ -0,0 +1,172 @@
1#include <linux/module.h>
2#include <linux/scatterlist.h>
3#include <linux/mempool.h>
4#include <linux/slab.h>
5
6#define SG_MEMPOOL_NR ARRAY_SIZE(sg_pools)
7#define SG_MEMPOOL_SIZE 2
8
9struct sg_pool {
10 size_t size;
11 char *name;
12 struct kmem_cache *slab;
13 mempool_t *pool;
14};
15
16#define SP(x) { .size = x, "sgpool-" __stringify(x) }
17#if (SG_CHUNK_SIZE < 32)
18#error SG_CHUNK_SIZE is too small (must be 32 or greater)
19#endif
20static struct sg_pool sg_pools[] = {
21 SP(8),
22 SP(16),
23#if (SG_CHUNK_SIZE > 32)
24 SP(32),
25#if (SG_CHUNK_SIZE > 64)
26 SP(64),
27#if (SG_CHUNK_SIZE > 128)
28 SP(128),
29#if (SG_CHUNK_SIZE > 256)
30#error SG_CHUNK_SIZE is too large (256 MAX)
31#endif
32#endif
33#endif
34#endif
35 SP(SG_CHUNK_SIZE)
36};
37#undef SP
38
39static inline unsigned int sg_pool_index(unsigned short nents)
40{
41 unsigned int index;
42
43 BUG_ON(nents > SG_CHUNK_SIZE);
44
45 if (nents <= 8)
46 index = 0;
47 else
48 index = get_count_order(nents) - 3;
49
50 return index;
51}
52
53static void sg_pool_free(struct scatterlist *sgl, unsigned int nents)
54{
55 struct sg_pool *sgp;
56
57 sgp = sg_pools + sg_pool_index(nents);
58 mempool_free(sgl, sgp->pool);
59}
60
61static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
62{
63 struct sg_pool *sgp;
64
65 sgp = sg_pools + sg_pool_index(nents);
66 return mempool_alloc(sgp->pool, gfp_mask);
67}
68
69/**
70 * sg_free_table_chained - Free a previously mapped sg table
71 * @table: The sg table header to use
72 * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained?
73 *
74 * Description:
75 * Free an sg table previously allocated and setup with
76 * sg_alloc_table_chained().
77 *
78 **/
79void sg_free_table_chained(struct sg_table *table, bool first_chunk)
80{
81 if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
82 return;
83 __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
84}
85EXPORT_SYMBOL_GPL(sg_free_table_chained);
86
87/**
88 * sg_alloc_table_chained - Allocate and chain SGLs in an sg table
89 * @table: The sg table header to use
90 * @nents: Number of entries in sg list
91 * @first_chunk: first SGL
92 *
93 * Description:
94 * Allocate and chain SGLs in an sg table. If @nents@ is larger than
95 * SG_CHUNK_SIZE a chained sg table will be setup.
96 *
97 **/
98int sg_alloc_table_chained(struct sg_table *table, int nents,
99 struct scatterlist *first_chunk)
100{
101 int ret;
102
103 BUG_ON(!nents);
104
105 if (first_chunk) {
106 if (nents <= SG_CHUNK_SIZE) {
107 table->nents = table->orig_nents = nents;
108 sg_init_table(table->sgl, nents);
109 return 0;
110 }
111 }
112
113 ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
114 first_chunk, GFP_ATOMIC, sg_pool_alloc);
115 if (unlikely(ret))
116 sg_free_table_chained(table, (bool)first_chunk);
117 return ret;
118}
119EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
120
121static __init int sg_pool_init(void)
122{
123 int i;
124
125 for (i = 0; i < SG_MEMPOOL_NR; i++) {
126 struct sg_pool *sgp = sg_pools + i;
127 int size = sgp->size * sizeof(struct scatterlist);
128
129 sgp->slab = kmem_cache_create(sgp->name, size, 0,
130 SLAB_HWCACHE_ALIGN, NULL);
131 if (!sgp->slab) {
132 printk(KERN_ERR "SG_POOL: can't init sg slab %s\n",
133 sgp->name);
134 goto cleanup_sdb;
135 }
136
137 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
138 sgp->slab);
139 if (!sgp->pool) {
140 printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n",
141 sgp->name);
142 goto cleanup_sdb;
143 }
144 }
145
146 return 0;
147
148cleanup_sdb:
149 for (i = 0; i < SG_MEMPOOL_NR; i++) {
150 struct sg_pool *sgp = sg_pools + i;
151 if (sgp->pool)
152 mempool_destroy(sgp->pool);
153 if (sgp->slab)
154 kmem_cache_destroy(sgp->slab);
155 }
156
157 return -ENOMEM;
158}
159
160static __exit void sg_pool_exit(void)
161{
162 int i;
163
164 for (i = 0; i < SG_MEMPOOL_NR; i++) {
165 struct sg_pool *sgp = sg_pools + i;
166 mempool_destroy(sgp->pool);
167 kmem_cache_destroy(sgp->slab);
168 }
169}
170
171module_init(sg_pool_init);
172module_exit(sg_pool_exit);