diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-12-07 01:28:35 -0500 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-12-07 01:29:22 -0500 |
commit | 6548698f929814375fa5d62ae1db96959b0418c1 (patch) | |
tree | 340924ae82cb0946aa15045b2b72186de52a8146 /lib/flex_array.c | |
parent | 1d2c6cfd40b2dece3bb958cbbc405a2c1536ab75 (diff) | |
parent | 22763c5cf3690a681551162c15d34d935308c8d7 (diff) |
Merge commit 'v2.6.32' into reiserfs/kill-bkl
Merge-reason: The tree was based 2.6.31. It's better to be up to date
with 2.6.32. Although no conflicting changes were made in between,
it gives benchmarking results closer to the lastest kernel behaviour.
Diffstat (limited to 'lib/flex_array.c')
-rw-r--r-- | lib/flex_array.c | 121 |
1 files changed, 90 insertions, 31 deletions
diff --git a/lib/flex_array.c b/lib/flex_array.c index 7baed2fc3bc8..66eef2e4483e 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -28,23 +28,6 @@ struct flex_array_part { | |||
28 | char elements[FLEX_ARRAY_PART_SIZE]; | 28 | char elements[FLEX_ARRAY_PART_SIZE]; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | static inline int __elements_per_part(int element_size) | ||
32 | { | ||
33 | return FLEX_ARRAY_PART_SIZE / element_size; | ||
34 | } | ||
35 | |||
36 | static inline int bytes_left_in_base(void) | ||
37 | { | ||
38 | int element_offset = offsetof(struct flex_array, parts); | ||
39 | int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset; | ||
40 | return bytes_left; | ||
41 | } | ||
42 | |||
43 | static inline int nr_base_part_ptrs(void) | ||
44 | { | ||
45 | return bytes_left_in_base() / sizeof(struct flex_array_part *); | ||
46 | } | ||
47 | |||
48 | /* | 31 | /* |
49 | * If a user requests an allocation which is small | 32 | * If a user requests an allocation which is small |
50 | * enough, we may simply use the space in the | 33 | * enough, we may simply use the space in the |
@@ -54,7 +37,7 @@ static inline int nr_base_part_ptrs(void) | |||
54 | static inline int elements_fit_in_base(struct flex_array *fa) | 37 | static inline int elements_fit_in_base(struct flex_array *fa) |
55 | { | 38 | { |
56 | int data_size = fa->element_size * fa->total_nr_elements; | 39 | int data_size = fa->element_size * fa->total_nr_elements; |
57 | if (data_size <= bytes_left_in_base()) | 40 | if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT) |
58 | return 1; | 41 | return 1; |
59 | return 0; | 42 | return 0; |
60 | } | 43 | } |
@@ -63,6 +46,7 @@ static inline int elements_fit_in_base(struct flex_array *fa) | |||
63 | * flex_array_alloc - allocate a new flexible array | 46 | * flex_array_alloc - allocate a new flexible array |
64 | * @element_size: the size of individual elements in the array | 47 | * @element_size: the size of individual elements in the array |
65 | * @total: total number of elements that this should hold | 48 | * @total: total number of elements that this should hold |
49 | * @flags: page allocation flags to use for base array | ||
66 | * | 50 | * |
67 | * Note: all locking must be provided by the caller. | 51 | * Note: all locking must be provided by the caller. |
68 | * | 52 | * |
@@ -103,7 +87,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
103 | gfp_t flags) | 87 | gfp_t flags) |
104 | { | 88 | { |
105 | struct flex_array *ret; | 89 | struct flex_array *ret; |
106 | int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); | 90 | int max_size = FLEX_ARRAY_NR_BASE_PTRS * |
91 | FLEX_ARRAY_ELEMENTS_PER_PART(element_size); | ||
107 | 92 | ||
108 | /* max_size will end up 0 if element_size > PAGE_SIZE */ | 93 | /* max_size will end up 0 if element_size > PAGE_SIZE */ |
109 | if (total > max_size) | 94 | if (total > max_size) |
@@ -113,17 +98,21 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
113 | return NULL; | 98 | return NULL; |
114 | ret->element_size = element_size; | 99 | ret->element_size = element_size; |
115 | ret->total_nr_elements = total; | 100 | ret->total_nr_elements = total; |
101 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) | ||
102 | memset(ret->parts[0], FLEX_ARRAY_FREE, | ||
103 | FLEX_ARRAY_BASE_BYTES_LEFT); | ||
116 | return ret; | 104 | return ret; |
117 | } | 105 | } |
118 | 106 | ||
119 | static int fa_element_to_part_nr(struct flex_array *fa, | 107 | static int fa_element_to_part_nr(struct flex_array *fa, |
120 | unsigned int element_nr) | 108 | unsigned int element_nr) |
121 | { | 109 | { |
122 | return element_nr / __elements_per_part(fa->element_size); | 110 | return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); |
123 | } | 111 | } |
124 | 112 | ||
125 | /** | 113 | /** |
126 | * flex_array_free_parts - just free the second-level pages | 114 | * flex_array_free_parts - just free the second-level pages |
115 | * @fa: the flex array from which to free parts | ||
127 | * | 116 | * |
128 | * This is to be used in cases where the base 'struct flex_array' | 117 | * This is to be used in cases where the base 'struct flex_array' |
129 | * has been statically allocated and should not be free. | 118 | * has been statically allocated and should not be free. |
@@ -131,11 +120,10 @@ static int fa_element_to_part_nr(struct flex_array *fa, | |||
131 | void flex_array_free_parts(struct flex_array *fa) | 120 | void flex_array_free_parts(struct flex_array *fa) |
132 | { | 121 | { |
133 | int part_nr; | 122 | int part_nr; |
134 | int max_part = nr_base_part_ptrs(); | ||
135 | 123 | ||
136 | if (elements_fit_in_base(fa)) | 124 | if (elements_fit_in_base(fa)) |
137 | return; | 125 | return; |
138 | for (part_nr = 0; part_nr < max_part; part_nr++) | 126 | for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) |
139 | kfree(fa->parts[part_nr]); | 127 | kfree(fa->parts[part_nr]); |
140 | } | 128 | } |
141 | 129 | ||
@@ -150,7 +138,8 @@ static unsigned int index_inside_part(struct flex_array *fa, | |||
150 | { | 138 | { |
151 | unsigned int part_offset; | 139 | unsigned int part_offset; |
152 | 140 | ||
153 | part_offset = element_nr % __elements_per_part(fa->element_size); | 141 | part_offset = element_nr % |
142 | FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); | ||
154 | return part_offset * fa->element_size; | 143 | return part_offset * fa->element_size; |
155 | } | 144 | } |
156 | 145 | ||
@@ -159,15 +148,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
159 | { | 148 | { |
160 | struct flex_array_part *part = fa->parts[part_nr]; | 149 | struct flex_array_part *part = fa->parts[part_nr]; |
161 | if (!part) { | 150 | if (!part) { |
162 | /* | 151 | part = kmalloc(sizeof(struct flex_array_part), flags); |
163 | * This leaves the part pages uninitialized | ||
164 | * and with potentially random data, just | ||
165 | * as if the user had kmalloc()'d the whole. | ||
166 | * __GFP_ZERO can be used to zero it. | ||
167 | */ | ||
168 | part = kmalloc(FLEX_ARRAY_PART_SIZE, flags); | ||
169 | if (!part) | 152 | if (!part) |
170 | return NULL; | 153 | return NULL; |
154 | if (!(flags & __GFP_ZERO)) | ||
155 | memset(part, FLEX_ARRAY_FREE, | ||
156 | sizeof(struct flex_array_part)); | ||
171 | fa->parts[part_nr] = part; | 157 | fa->parts[part_nr] = part; |
172 | } | 158 | } |
173 | return part; | 159 | return part; |
@@ -175,9 +161,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
175 | 161 | ||
176 | /** | 162 | /** |
177 | * flex_array_put - copy data into the array at @element_nr | 163 | * flex_array_put - copy data into the array at @element_nr |
178 | * @src: address of data to copy into the array | 164 | * @fa: the flex array to copy data into |
179 | * @element_nr: index of the position in which to insert | 165 | * @element_nr: index of the position in which to insert |
180 | * the new element. | 166 | * the new element. |
167 | * @src: address of data to copy into the array | ||
168 | * @flags: page allocation flags to use for array expansion | ||
169 | * | ||
181 | * | 170 | * |
182 | * Note that this *copies* the contents of @src into | 171 | * Note that this *copies* the contents of @src into |
183 | * the array. If you are trying to store an array of | 172 | * the array. If you are trying to store an array of |
@@ -207,9 +196,38 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, | |||
207 | } | 196 | } |
208 | 197 | ||
209 | /** | 198 | /** |
199 | * flex_array_clear - clear element in array at @element_nr | ||
200 | * @fa: the flex array of the element. | ||
201 | * @element_nr: index of the position to clear. | ||
202 | * | ||
203 | * Locking must be provided by the caller. | ||
204 | */ | ||
205 | int flex_array_clear(struct flex_array *fa, unsigned int element_nr) | ||
206 | { | ||
207 | int part_nr = fa_element_to_part_nr(fa, element_nr); | ||
208 | struct flex_array_part *part; | ||
209 | void *dst; | ||
210 | |||
211 | if (element_nr >= fa->total_nr_elements) | ||
212 | return -ENOSPC; | ||
213 | if (elements_fit_in_base(fa)) | ||
214 | part = (struct flex_array_part *)&fa->parts[0]; | ||
215 | else { | ||
216 | part = fa->parts[part_nr]; | ||
217 | if (!part) | ||
218 | return -EINVAL; | ||
219 | } | ||
220 | dst = &part->elements[index_inside_part(fa, element_nr)]; | ||
221 | memset(dst, FLEX_ARRAY_FREE, fa->element_size); | ||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | /** | ||
210 | * flex_array_prealloc - guarantee that array space exists | 226 | * flex_array_prealloc - guarantee that array space exists |
227 | * @fa: the flex array for which to preallocate parts | ||
211 | * @start: index of first array element for which space is allocated | 228 | * @start: index of first array element for which space is allocated |
212 | * @end: index of last (inclusive) element for which space is allocated | 229 | * @end: index of last (inclusive) element for which space is allocated |
230 | * @flags: page allocation flags | ||
213 | * | 231 | * |
214 | * This will guarantee that no future calls to flex_array_put() | 232 | * This will guarantee that no future calls to flex_array_put() |
215 | * will allocate memory. It can be used if you are expecting to | 233 | * will allocate memory. It can be used if you are expecting to |
@@ -242,6 +260,7 @@ int flex_array_prealloc(struct flex_array *fa, unsigned int start, | |||
242 | 260 | ||
243 | /** | 261 | /** |
244 | * flex_array_get - pull data back out of the array | 262 | * flex_array_get - pull data back out of the array |
263 | * @fa: the flex array from which to extract data | ||
245 | * @element_nr: index of the element to fetch from the array | 264 | * @element_nr: index of the element to fetch from the array |
246 | * | 265 | * |
247 | * Returns a pointer to the data at index @element_nr. Note | 266 | * Returns a pointer to the data at index @element_nr. Note |
@@ -266,3 +285,43 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr) | |||
266 | } | 285 | } |
267 | return &part->elements[index_inside_part(fa, element_nr)]; | 286 | return &part->elements[index_inside_part(fa, element_nr)]; |
268 | } | 287 | } |
288 | |||
289 | static int part_is_free(struct flex_array_part *part) | ||
290 | { | ||
291 | int i; | ||
292 | |||
293 | for (i = 0; i < sizeof(struct flex_array_part); i++) | ||
294 | if (part->elements[i] != FLEX_ARRAY_FREE) | ||
295 | return 0; | ||
296 | return 1; | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * flex_array_shrink - free unused second-level pages | ||
301 | * @fa: the flex array to shrink | ||
302 | * | ||
303 | * Frees all second-level pages that consist solely of unused | ||
304 | * elements. Returns the number of pages freed. | ||
305 | * | ||
306 | * Locking must be provided by the caller. | ||
307 | */ | ||
308 | int flex_array_shrink(struct flex_array *fa) | ||
309 | { | ||
310 | struct flex_array_part *part; | ||
311 | int part_nr; | ||
312 | int ret = 0; | ||
313 | |||
314 | if (elements_fit_in_base(fa)) | ||
315 | return ret; | ||
316 | for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) { | ||
317 | part = fa->parts[part_nr]; | ||
318 | if (!part) | ||
319 | continue; | ||
320 | if (part_is_free(part)) { | ||
321 | fa->parts[part_nr] = NULL; | ||
322 | kfree(part); | ||
323 | ret++; | ||
324 | } | ||
325 | } | ||
326 | return ret; | ||
327 | } | ||