diff options
Diffstat (limited to 'lib/scatterlist.c')
-rw-r--r-- | lib/scatterlist.c | 281 |
1 files changed, 281 insertions, 0 deletions
diff --git a/lib/scatterlist.c b/lib/scatterlist.c new file mode 100644 index 000000000000..02aaa27e010e --- /dev/null +++ b/lib/scatterlist.c | |||
@@ -0,0 +1,281 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> | ||
3 | * | ||
4 | * Scatterlist handling helpers. | ||
5 | * | ||
6 | * This source code is licensed under the GNU General Public License, | ||
7 | * Version 2. See the file COPYING for more details. | ||
8 | */ | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/scatterlist.h> | ||
11 | |||
12 | /** | ||
13 | * sg_next - return the next scatterlist entry in a list | ||
14 | * @sg: The current sg entry | ||
15 | * | ||
16 | * Description: | ||
17 | * Usually the next entry will be @sg@ + 1, but if this sg element is part | ||
18 | * of a chained scatterlist, it could jump to the start of a new | ||
19 | * scatterlist array. | ||
20 | * | ||
21 | **/ | ||
22 | struct scatterlist *sg_next(struct scatterlist *sg) | ||
23 | { | ||
24 | #ifdef CONFIG_DEBUG_SG | ||
25 | BUG_ON(sg->sg_magic != SG_MAGIC); | ||
26 | #endif | ||
27 | if (sg_is_last(sg)) | ||
28 | return NULL; | ||
29 | |||
30 | sg++; | ||
31 | if (unlikely(sg_is_chain(sg))) | ||
32 | sg = sg_chain_ptr(sg); | ||
33 | |||
34 | return sg; | ||
35 | } | ||
36 | EXPORT_SYMBOL(sg_next); | ||
37 | |||
38 | /** | ||
39 | * sg_last - return the last scatterlist entry in a list | ||
40 | * @sgl: First entry in the scatterlist | ||
41 | * @nents: Number of entries in the scatterlist | ||
42 | * | ||
43 | * Description: | ||
44 | * Should only be used casually, it (currently) scans the entire list | ||
45 | * to get the last entry. | ||
46 | * | ||
47 | * Note that the @sgl@ pointer passed in need not be the first one, | ||
48 | * the important bit is that @nents@ denotes the number of entries that | ||
49 | * exist from @sgl@. | ||
50 | * | ||
51 | **/ | ||
52 | struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) | ||
53 | { | ||
54 | #ifndef ARCH_HAS_SG_CHAIN | ||
55 | struct scatterlist *ret = &sgl[nents - 1]; | ||
56 | #else | ||
57 | struct scatterlist *sg, *ret = NULL; | ||
58 | unsigned int i; | ||
59 | |||
60 | for_each_sg(sgl, sg, nents, i) | ||
61 | ret = sg; | ||
62 | |||
63 | #endif | ||
64 | #ifdef CONFIG_DEBUG_SG | ||
65 | BUG_ON(sgl[0].sg_magic != SG_MAGIC); | ||
66 | BUG_ON(!sg_is_last(ret)); | ||
67 | #endif | ||
68 | return ret; | ||
69 | } | ||
70 | EXPORT_SYMBOL(sg_last); | ||
71 | |||
72 | /** | ||
73 | * sg_init_table - Initialize SG table | ||
74 | * @sgl: The SG table | ||
75 | * @nents: Number of entries in table | ||
76 | * | ||
77 | * Notes: | ||
78 | * If this is part of a chained sg table, sg_mark_end() should be | ||
79 | * used only on the last table part. | ||
80 | * | ||
81 | **/ | ||
82 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) | ||
83 | { | ||
84 | memset(sgl, 0, sizeof(*sgl) * nents); | ||
85 | #ifdef CONFIG_DEBUG_SG | ||
86 | { | ||
87 | unsigned int i; | ||
88 | for (i = 0; i < nents; i++) | ||
89 | sgl[i].sg_magic = SG_MAGIC; | ||
90 | } | ||
91 | #endif | ||
92 | sg_mark_end(&sgl[nents - 1]); | ||
93 | } | ||
94 | EXPORT_SYMBOL(sg_init_table); | ||
95 | |||
96 | /** | ||
97 | * sg_init_one - Initialize a single entry sg list | ||
98 | * @sg: SG entry | ||
99 | * @buf: Virtual address for IO | ||
100 | * @buflen: IO length | ||
101 | * | ||
102 | **/ | ||
103 | void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) | ||
104 | { | ||
105 | sg_init_table(sg, 1); | ||
106 | sg_set_buf(sg, buf, buflen); | ||
107 | } | ||
108 | EXPORT_SYMBOL(sg_init_one); | ||
109 | |||
110 | /* | ||
111 | * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree | ||
112 | * helpers. | ||
113 | */ | ||
114 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) | ||
115 | { | ||
116 | if (nents == SG_MAX_SINGLE_ALLOC) | ||
117 | return (struct scatterlist *) __get_free_page(gfp_mask); | ||
118 | else | ||
119 | return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); | ||
120 | } | ||
121 | |||
122 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) | ||
123 | { | ||
124 | if (nents == SG_MAX_SINGLE_ALLOC) | ||
125 | free_page((unsigned long) sg); | ||
126 | else | ||
127 | kfree(sg); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * __sg_free_table - Free a previously mapped sg table | ||
132 | * @table: The sg table header to use | ||
133 | * @free_fn: Free function | ||
134 | * | ||
135 | * Description: | ||
136 | * Free an sg table previously allocated and setup with __sg_alloc_table(). | ||
137 | * | ||
138 | **/ | ||
139 | void __sg_free_table(struct sg_table *table, sg_free_fn *free_fn) | ||
140 | { | ||
141 | struct scatterlist *sgl, *next; | ||
142 | |||
143 | if (unlikely(!table->sgl)) | ||
144 | return; | ||
145 | |||
146 | sgl = table->sgl; | ||
147 | while (table->orig_nents) { | ||
148 | unsigned int alloc_size = table->orig_nents; | ||
149 | unsigned int sg_size; | ||
150 | |||
151 | /* | ||
152 | * If we have more than SG_MAX_SINGLE_ALLOC segments left, | ||
153 | * then assign 'next' to the sg table after the current one. | ||
154 | * sg_size is then one less than alloc size, since the last | ||
155 | * element is the chain pointer. | ||
156 | */ | ||
157 | if (alloc_size > SG_MAX_SINGLE_ALLOC) { | ||
158 | next = sg_chain_ptr(&sgl[SG_MAX_SINGLE_ALLOC - 1]); | ||
159 | alloc_size = SG_MAX_SINGLE_ALLOC; | ||
160 | sg_size = alloc_size - 1; | ||
161 | } else { | ||
162 | sg_size = alloc_size; | ||
163 | next = NULL; | ||
164 | } | ||
165 | |||
166 | table->orig_nents -= sg_size; | ||
167 | free_fn(sgl, alloc_size); | ||
168 | sgl = next; | ||
169 | } | ||
170 | |||
171 | table->sgl = NULL; | ||
172 | } | ||
173 | EXPORT_SYMBOL(__sg_free_table); | ||
174 | |||
175 | /** | ||
176 | * sg_free_table - Free a previously allocated sg table | ||
177 | * @table: The mapped sg table header | ||
178 | * | ||
179 | **/ | ||
180 | void sg_free_table(struct sg_table *table) | ||
181 | { | ||
182 | __sg_free_table(table, sg_kfree); | ||
183 | } | ||
184 | EXPORT_SYMBOL(sg_free_table); | ||
185 | |||
186 | /** | ||
187 | * __sg_alloc_table - Allocate and initialize an sg table with given allocator | ||
188 | * @table: The sg table header to use | ||
189 | * @nents: Number of entries in sg list | ||
190 | * @gfp_mask: GFP allocation mask | ||
191 | * @alloc_fn: Allocator to use | ||
192 | * | ||
193 | * Notes: | ||
194 | * If this function returns non-0 (eg failure), the caller must call | ||
195 | * __sg_free_table() to cleanup any leftover allocations. | ||
196 | * | ||
197 | **/ | ||
198 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask, | ||
199 | sg_alloc_fn *alloc_fn) | ||
200 | { | ||
201 | struct scatterlist *sg, *prv; | ||
202 | unsigned int left; | ||
203 | |||
204 | #ifndef ARCH_HAS_SG_CHAIN | ||
205 | BUG_ON(nents > SG_MAX_SINGLE_ALLOC); | ||
206 | #endif | ||
207 | |||
208 | memset(table, 0, sizeof(*table)); | ||
209 | |||
210 | left = nents; | ||
211 | prv = NULL; | ||
212 | do { | ||
213 | unsigned int sg_size, alloc_size = left; | ||
214 | |||
215 | if (alloc_size > SG_MAX_SINGLE_ALLOC) { | ||
216 | alloc_size = SG_MAX_SINGLE_ALLOC; | ||
217 | sg_size = alloc_size - 1; | ||
218 | } else | ||
219 | sg_size = alloc_size; | ||
220 | |||
221 | left -= sg_size; | ||
222 | |||
223 | sg = alloc_fn(alloc_size, gfp_mask); | ||
224 | if (unlikely(!sg)) | ||
225 | return -ENOMEM; | ||
226 | |||
227 | sg_init_table(sg, alloc_size); | ||
228 | table->nents = table->orig_nents += sg_size; | ||
229 | |||
230 | /* | ||
231 | * If this is the first mapping, assign the sg table header. | ||
232 | * If this is not the first mapping, chain previous part. | ||
233 | */ | ||
234 | if (prv) | ||
235 | sg_chain(prv, SG_MAX_SINGLE_ALLOC, sg); | ||
236 | else | ||
237 | table->sgl = sg; | ||
238 | |||
239 | /* | ||
240 | * If no more entries after this one, mark the end | ||
241 | */ | ||
242 | if (!left) | ||
243 | sg_mark_end(&sg[sg_size - 1]); | ||
244 | |||
245 | /* | ||
246 | * only really needed for mempool backed sg allocations (like | ||
247 | * SCSI), a possible improvement here would be to pass the | ||
248 | * table pointer into the allocator and let that clear these | ||
249 | * flags | ||
250 | */ | ||
251 | gfp_mask &= ~__GFP_WAIT; | ||
252 | gfp_mask |= __GFP_HIGH; | ||
253 | prv = sg; | ||
254 | } while (left); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | EXPORT_SYMBOL(__sg_alloc_table); | ||
259 | |||
260 | /** | ||
261 | * sg_alloc_table - Allocate and initialize an sg table | ||
262 | * @table: The sg table header to use | ||
263 | * @nents: Number of entries in sg list | ||
264 | * @gfp_mask: GFP allocation mask | ||
265 | * | ||
266 | * Description: | ||
267 | * Allocate and initialize an sg table. If @nents@ is larger than | ||
268 | * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. | ||
269 | * | ||
270 | **/ | ||
271 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | ||
272 | { | ||
273 | int ret; | ||
274 | |||
275 | ret = __sg_alloc_table(table, nents, gfp_mask, sg_kmalloc); | ||
276 | if (unlikely(ret)) | ||
277 | __sg_free_table(table, sg_kfree); | ||
278 | |||
279 | return ret; | ||
280 | } | ||
281 | EXPORT_SYMBOL(sg_alloc_table); | ||