diff options
Diffstat (limited to 'lib/flex_array.c')
-rw-r--r-- | lib/flex_array.c | 160 |
1 files changed, 110 insertions, 50 deletions
diff --git a/lib/flex_array.c b/lib/flex_array.c index 08f1636d296a..66eef2e4483e 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -28,23 +28,6 @@ struct flex_array_part { | |||
28 | char elements[FLEX_ARRAY_PART_SIZE]; | 28 | char elements[FLEX_ARRAY_PART_SIZE]; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | static inline int __elements_per_part(int element_size) | ||
32 | { | ||
33 | return FLEX_ARRAY_PART_SIZE / element_size; | ||
34 | } | ||
35 | |||
36 | static inline int bytes_left_in_base(void) | ||
37 | { | ||
38 | int element_offset = offsetof(struct flex_array, parts); | ||
39 | int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset; | ||
40 | return bytes_left; | ||
41 | } | ||
42 | |||
43 | static inline int nr_base_part_ptrs(void) | ||
44 | { | ||
45 | return bytes_left_in_base() / sizeof(struct flex_array_part *); | ||
46 | } | ||
47 | |||
48 | /* | 31 | /* |
49 | * If a user requests an allocation which is small | 32 | * If a user requests an allocation which is small |
50 | * enough, we may simply use the space in the | 33 | * enough, we may simply use the space in the |
@@ -54,7 +37,7 @@ static inline int nr_base_part_ptrs(void) | |||
54 | static inline int elements_fit_in_base(struct flex_array *fa) | 37 | static inline int elements_fit_in_base(struct flex_array *fa) |
55 | { | 38 | { |
56 | int data_size = fa->element_size * fa->total_nr_elements; | 39 | int data_size = fa->element_size * fa->total_nr_elements; |
57 | if (data_size <= bytes_left_in_base()) | 40 | if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT) |
58 | return 1; | 41 | return 1; |
59 | return 0; | 42 | return 0; |
60 | } | 43 | } |
@@ -63,6 +46,7 @@ static inline int elements_fit_in_base(struct flex_array *fa) | |||
63 | * flex_array_alloc - allocate a new flexible array | 46 | * flex_array_alloc - allocate a new flexible array |
64 | * @element_size: the size of individual elements in the array | 47 | * @element_size: the size of individual elements in the array |
65 | * @total: total number of elements that this should hold | 48 | * @total: total number of elements that this should hold |
49 | * @flags: page allocation flags to use for base array | ||
66 | * | 50 | * |
67 | * Note: all locking must be provided by the caller. | 51 | * Note: all locking must be provided by the caller. |
68 | * | 52 | * |
@@ -99,10 +83,12 @@ static inline int elements_fit_in_base(struct flex_array *fa) | |||
99 | * capacity in the base structure. Also note that no effort is made | 83 | * capacity in the base structure. Also note that no effort is made |
100 | * to efficiently pack objects across page boundaries. | 84 | * to efficiently pack objects across page boundaries. |
101 | */ | 85 | */ |
102 | struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) | 86 | struct flex_array *flex_array_alloc(int element_size, unsigned int total, |
87 | gfp_t flags) | ||
103 | { | 88 | { |
104 | struct flex_array *ret; | 89 | struct flex_array *ret; |
105 | int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); | 90 | int max_size = FLEX_ARRAY_NR_BASE_PTRS * |
91 | FLEX_ARRAY_ELEMENTS_PER_PART(element_size); | ||
106 | 92 | ||
107 | /* max_size will end up 0 if element_size > PAGE_SIZE */ | 93 | /* max_size will end up 0 if element_size > PAGE_SIZE */ |
108 | if (total > max_size) | 94 | if (total > max_size) |
@@ -112,19 +98,21 @@ struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) | |||
112 | return NULL; | 98 | return NULL; |
113 | ret->element_size = element_size; | 99 | ret->element_size = element_size; |
114 | ret->total_nr_elements = total; | 100 | ret->total_nr_elements = total; |
101 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) | ||
102 | memset(ret->parts[0], FLEX_ARRAY_FREE, | ||
103 | FLEX_ARRAY_BASE_BYTES_LEFT); | ||
115 | return ret; | 104 | return ret; |
116 | } | 105 | } |
117 | 106 | ||
118 | static int fa_element_to_part_nr(struct flex_array *fa, int element_nr) | 107 | static int fa_element_to_part_nr(struct flex_array *fa, |
108 | unsigned int element_nr) | ||
119 | { | 109 | { |
120 | return element_nr / __elements_per_part(fa->element_size); | 110 | return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); |
121 | } | 111 | } |
122 | 112 | ||
123 | /** | 113 | /** |
124 | * flex_array_free_parts - just free the second-level pages | 114 | * flex_array_free_parts - just free the second-level pages |
125 | * @src: address of data to copy into the array | 115 | * @fa: the flex array from which to free parts |
126 | * @element_nr: index of the position in which to insert | ||
127 | * the new element. | ||
128 | * | 116 | * |
129 | * This is to be used in cases where the base 'struct flex_array' | 117 | * This is to be used in cases where the base 'struct flex_array' |
130 | * has been statically allocated and should not be free. | 118 | * has been statically allocated and should not be free. |
@@ -132,11 +120,10 @@ static int fa_element_to_part_nr(struct flex_array *fa, int element_nr) | |||
132 | void flex_array_free_parts(struct flex_array *fa) | 120 | void flex_array_free_parts(struct flex_array *fa) |
133 | { | 121 | { |
134 | int part_nr; | 122 | int part_nr; |
135 | int max_part = nr_base_part_ptrs(); | ||
136 | 123 | ||
137 | if (elements_fit_in_base(fa)) | 124 | if (elements_fit_in_base(fa)) |
138 | return; | 125 | return; |
139 | for (part_nr = 0; part_nr < max_part; part_nr++) | 126 | for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) |
140 | kfree(fa->parts[part_nr]); | 127 | kfree(fa->parts[part_nr]); |
141 | } | 128 | } |
142 | 129 | ||
@@ -146,14 +133,13 @@ void flex_array_free(struct flex_array *fa) | |||
146 | kfree(fa); | 133 | kfree(fa); |
147 | } | 134 | } |
148 | 135 | ||
149 | static int fa_index_inside_part(struct flex_array *fa, int element_nr) | 136 | static unsigned int index_inside_part(struct flex_array *fa, |
137 | unsigned int element_nr) | ||
150 | { | 138 | { |
151 | return element_nr % __elements_per_part(fa->element_size); | 139 | unsigned int part_offset; |
152 | } | ||
153 | 140 | ||
154 | static int index_inside_part(struct flex_array *fa, int element_nr) | 141 | part_offset = element_nr % |
155 | { | 142 | FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); |
156 | int part_offset = fa_index_inside_part(fa, element_nr); | ||
157 | return part_offset * fa->element_size; | 143 | return part_offset * fa->element_size; |
158 | } | 144 | } |
159 | 145 | ||
@@ -162,15 +148,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
162 | { | 148 | { |
163 | struct flex_array_part *part = fa->parts[part_nr]; | 149 | struct flex_array_part *part = fa->parts[part_nr]; |
164 | if (!part) { | 150 | if (!part) { |
165 | /* | 151 | part = kmalloc(sizeof(struct flex_array_part), flags); |
166 | * This leaves the part pages uninitialized | ||
167 | * and with potentially random data, just | ||
168 | * as if the user had kmalloc()'d the whole. | ||
169 | * __GFP_ZERO can be used to zero it. | ||
170 | */ | ||
171 | part = kmalloc(FLEX_ARRAY_PART_SIZE, flags); | ||
172 | if (!part) | 152 | if (!part) |
173 | return NULL; | 153 | return NULL; |
154 | if (!(flags & __GFP_ZERO)) | ||
155 | memset(part, FLEX_ARRAY_FREE, | ||
156 | sizeof(struct flex_array_part)); | ||
174 | fa->parts[part_nr] = part; | 157 | fa->parts[part_nr] = part; |
175 | } | 158 | } |
176 | return part; | 159 | return part; |
@@ -178,9 +161,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
178 | 161 | ||
179 | /** | 162 | /** |
180 | * flex_array_put - copy data into the array at @element_nr | 163 | * flex_array_put - copy data into the array at @element_nr |
181 | * @src: address of data to copy into the array | 164 | * @fa: the flex array to copy data into |
182 | * @element_nr: index of the position in which to insert | 165 | * @element_nr: index of the position in which to insert |
183 | * the new element. | 166 | * the new element. |
167 | * @src: address of data to copy into the array | ||
168 | * @flags: page allocation flags to use for array expansion | ||
169 | * | ||
184 | * | 170 | * |
185 | * Note that this *copies* the contents of @src into | 171 | * Note that this *copies* the contents of @src into |
186 | * the array. If you are trying to store an array of | 172 | * the array. If you are trying to store an array of |
@@ -188,7 +174,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
188 | * | 174 | * |
189 | * Locking must be provided by the caller. | 175 | * Locking must be provided by the caller. |
190 | */ | 176 | */ |
191 | int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags) | 177 | int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, |
178 | gfp_t flags) | ||
192 | { | 179 | { |
193 | int part_nr = fa_element_to_part_nr(fa, element_nr); | 180 | int part_nr = fa_element_to_part_nr(fa, element_nr); |
194 | struct flex_array_part *part; | 181 | struct flex_array_part *part; |
@@ -198,19 +185,49 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags | |||
198 | return -ENOSPC; | 185 | return -ENOSPC; |
199 | if (elements_fit_in_base(fa)) | 186 | if (elements_fit_in_base(fa)) |
200 | part = (struct flex_array_part *)&fa->parts[0]; | 187 | part = (struct flex_array_part *)&fa->parts[0]; |
201 | else | 188 | else { |
202 | part = __fa_get_part(fa, part_nr, flags); | 189 | part = __fa_get_part(fa, part_nr, flags); |
203 | if (!part) | 190 | if (!part) |
204 | return -ENOMEM; | 191 | return -ENOMEM; |
192 | } | ||
205 | dst = &part->elements[index_inside_part(fa, element_nr)]; | 193 | dst = &part->elements[index_inside_part(fa, element_nr)]; |
206 | memcpy(dst, src, fa->element_size); | 194 | memcpy(dst, src, fa->element_size); |
207 | return 0; | 195 | return 0; |
208 | } | 196 | } |
209 | 197 | ||
210 | /** | 198 | /** |
199 | * flex_array_clear - clear element in array at @element_nr | ||
200 | * @fa: the flex array of the element. | ||
201 | * @element_nr: index of the position to clear. | ||
202 | * | ||
203 | * Locking must be provided by the caller. | ||
204 | */ | ||
205 | int flex_array_clear(struct flex_array *fa, unsigned int element_nr) | ||
206 | { | ||
207 | int part_nr = fa_element_to_part_nr(fa, element_nr); | ||
208 | struct flex_array_part *part; | ||
209 | void *dst; | ||
210 | |||
211 | if (element_nr >= fa->total_nr_elements) | ||
212 | return -ENOSPC; | ||
213 | if (elements_fit_in_base(fa)) | ||
214 | part = (struct flex_array_part *)&fa->parts[0]; | ||
215 | else { | ||
216 | part = fa->parts[part_nr]; | ||
217 | if (!part) | ||
218 | return -EINVAL; | ||
219 | } | ||
220 | dst = &part->elements[index_inside_part(fa, element_nr)]; | ||
221 | memset(dst, FLEX_ARRAY_FREE, fa->element_size); | ||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | /** | ||
211 | * flex_array_prealloc - guarantee that array space exists | 226 | * flex_array_prealloc - guarantee that array space exists |
227 | * @fa: the flex array for which to preallocate parts | ||
212 | * @start: index of first array element for which space is allocated | 228 | * @start: index of first array element for which space is allocated |
213 | * @end: index of last (inclusive) element for which space is allocated | 229 | * @end: index of last (inclusive) element for which space is allocated |
230 | * @flags: page allocation flags | ||
214 | * | 231 | * |
215 | * This will guarantee that no future calls to flex_array_put() | 232 | * This will guarantee that no future calls to flex_array_put() |
216 | * will allocate memory. It can be used if you are expecting to | 233 | * will allocate memory. It can be used if you are expecting to |
@@ -219,7 +236,8 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags | |||
219 | * | 236 | * |
220 | * Locking must be provided by the caller. | 237 | * Locking must be provided by the caller. |
221 | */ | 238 | */ |
222 | int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) | 239 | int flex_array_prealloc(struct flex_array *fa, unsigned int start, |
240 | unsigned int end, gfp_t flags) | ||
223 | { | 241 | { |
224 | int start_part; | 242 | int start_part; |
225 | int end_part; | 243 | int end_part; |
@@ -242,6 +260,7 @@ int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) | |||
242 | 260 | ||
243 | /** | 261 | /** |
244 | * flex_array_get - pull data back out of the array | 262 | * flex_array_get - pull data back out of the array |
263 | * @fa: the flex array from which to extract data | ||
245 | * @element_nr: index of the element to fetch from the array | 264 | * @element_nr: index of the element to fetch from the array |
246 | * | 265 | * |
247 | * Returns a pointer to the data at index @element_nr. Note | 266 | * Returns a pointer to the data at index @element_nr. Note |
@@ -250,18 +269,59 @@ int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) | |||
250 | * | 269 | * |
251 | * Locking must be provided by the caller. | 270 | * Locking must be provided by the caller. |
252 | */ | 271 | */ |
253 | void *flex_array_get(struct flex_array *fa, int element_nr) | 272 | void *flex_array_get(struct flex_array *fa, unsigned int element_nr) |
254 | { | 273 | { |
255 | int part_nr = fa_element_to_part_nr(fa, element_nr); | 274 | int part_nr = fa_element_to_part_nr(fa, element_nr); |
256 | struct flex_array_part *part; | 275 | struct flex_array_part *part; |
257 | 276 | ||
258 | if (element_nr >= fa->total_nr_elements) | 277 | if (element_nr >= fa->total_nr_elements) |
259 | return NULL; | 278 | return NULL; |
260 | if (!fa->parts[part_nr]) | ||
261 | return NULL; | ||
262 | if (elements_fit_in_base(fa)) | 279 | if (elements_fit_in_base(fa)) |
263 | part = (struct flex_array_part *)&fa->parts[0]; | 280 | part = (struct flex_array_part *)&fa->parts[0]; |
264 | else | 281 | else { |
265 | part = fa->parts[part_nr]; | 282 | part = fa->parts[part_nr]; |
283 | if (!part) | ||
284 | return NULL; | ||
285 | } | ||
266 | return &part->elements[index_inside_part(fa, element_nr)]; | 286 | return &part->elements[index_inside_part(fa, element_nr)]; |
267 | } | 287 | } |
288 | |||
289 | static int part_is_free(struct flex_array_part *part) | ||
290 | { | ||
291 | int i; | ||
292 | |||
293 | for (i = 0; i < sizeof(struct flex_array_part); i++) | ||
294 | if (part->elements[i] != FLEX_ARRAY_FREE) | ||
295 | return 0; | ||
296 | return 1; | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * flex_array_shrink - free unused second-level pages | ||
301 | * @fa: the flex array to shrink | ||
302 | * | ||
303 | * Frees all second-level pages that consist solely of unused | ||
304 | * elements. Returns the number of pages freed. | ||
305 | * | ||
306 | * Locking must be provided by the caller. | ||
307 | */ | ||
308 | int flex_array_shrink(struct flex_array *fa) | ||
309 | { | ||
310 | struct flex_array_part *part; | ||
311 | int part_nr; | ||
312 | int ret = 0; | ||
313 | |||
314 | if (elements_fit_in_base(fa)) | ||
315 | return ret; | ||
316 | for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) { | ||
317 | part = fa->parts[part_nr]; | ||
318 | if (!part) | ||
319 | continue; | ||
320 | if (part_is_free(part)) { | ||
321 | fa->parts[part_nr] = NULL; | ||
322 | kfree(part); | ||
323 | ret++; | ||
324 | } | ||
325 | } | ||
326 | return ret; | ||
327 | } | ||