diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /mm/mempool.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'mm/mempool.c')
-rw-r--r-- | mm/mempool.c | 118 |
1 files changed, 40 insertions, 78 deletions
diff --git a/mm/mempool.c b/mm/mempool.c index 54990476c04..1a3bc3d4d55 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/export.h> | 13 | #include <linux/module.h> |
14 | #include <linux/mempool.h> | 14 | #include <linux/mempool.h> |
15 | #include <linux/blkdev.h> | 15 | #include <linux/blkdev.h> |
16 | #include <linux/writeback.h> | 16 | #include <linux/writeback.h> |
@@ -27,15 +27,7 @@ static void *remove_element(mempool_t *pool) | |||
27 | return pool->elements[--pool->curr_nr]; | 27 | return pool->elements[--pool->curr_nr]; |
28 | } | 28 | } |
29 | 29 | ||
30 | /** | 30 | static void free_pool(mempool_t *pool) |
31 | * mempool_destroy - deallocate a memory pool | ||
32 | * @pool: pointer to the memory pool which was allocated via | ||
33 | * mempool_create(). | ||
34 | * | ||
35 | * Free all reserved elements in @pool and @pool itself. This function | ||
36 | * only sleeps if the free_fn() function sleeps. | ||
37 | */ | ||
38 | void mempool_destroy(mempool_t *pool) | ||
39 | { | 31 | { |
40 | while (pool->curr_nr) { | 32 | while (pool->curr_nr) { |
41 | void *element = remove_element(pool); | 33 | void *element = remove_element(pool); |
@@ -44,7 +36,6 @@ void mempool_destroy(mempool_t *pool) | |||
44 | kfree(pool->elements); | 36 | kfree(pool->elements); |
45 | kfree(pool); | 37 | kfree(pool); |
46 | } | 38 | } |
47 | EXPORT_SYMBOL(mempool_destroy); | ||
48 | 39 | ||
49 | /** | 40 | /** |
50 | * mempool_create - create a memory pool | 41 | * mempool_create - create a memory pool |
@@ -63,21 +54,19 @@ EXPORT_SYMBOL(mempool_destroy); | |||
63 | mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, | 54 | mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, |
64 | mempool_free_t *free_fn, void *pool_data) | 55 | mempool_free_t *free_fn, void *pool_data) |
65 | { | 56 | { |
66 | return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, | 57 | return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1); |
67 | GFP_KERNEL, NUMA_NO_NODE); | ||
68 | } | 58 | } |
69 | EXPORT_SYMBOL(mempool_create); | 59 | EXPORT_SYMBOL(mempool_create); |
70 | 60 | ||
71 | mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, | 61 | mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, |
72 | mempool_free_t *free_fn, void *pool_data, | 62 | mempool_free_t *free_fn, void *pool_data, int node_id) |
73 | gfp_t gfp_mask, int node_id) | ||
74 | { | 63 | { |
75 | mempool_t *pool; | 64 | mempool_t *pool; |
76 | pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); | 65 | pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); |
77 | if (!pool) | 66 | if (!pool) |
78 | return NULL; | 67 | return NULL; |
79 | pool->elements = kmalloc_node(min_nr * sizeof(void *), | 68 | pool->elements = kmalloc_node(min_nr * sizeof(void *), |
80 | gfp_mask, node_id); | 69 | GFP_KERNEL, node_id); |
81 | if (!pool->elements) { | 70 | if (!pool->elements) { |
82 | kfree(pool); | 71 | kfree(pool); |
83 | return NULL; | 72 | return NULL; |
@@ -95,9 +84,9 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, | |||
95 | while (pool->curr_nr < pool->min_nr) { | 84 | while (pool->curr_nr < pool->min_nr) { |
96 | void *element; | 85 | void *element; |
97 | 86 | ||
98 | element = pool->alloc(gfp_mask, pool->pool_data); | 87 | element = pool->alloc(GFP_KERNEL, pool->pool_data); |
99 | if (unlikely(!element)) { | 88 | if (unlikely(!element)) { |
100 | mempool_destroy(pool); | 89 | free_pool(pool); |
101 | return NULL; | 90 | return NULL; |
102 | } | 91 | } |
103 | add_element(pool, element); | 92 | add_element(pool, element); |
@@ -183,6 +172,23 @@ out: | |||
183 | EXPORT_SYMBOL(mempool_resize); | 172 | EXPORT_SYMBOL(mempool_resize); |
184 | 173 | ||
185 | /** | 174 | /** |
175 | * mempool_destroy - deallocate a memory pool | ||
176 | * @pool: pointer to the memory pool which was allocated via | ||
177 | * mempool_create(). | ||
178 | * | ||
179 | * this function only sleeps if the free_fn() function sleeps. The caller | ||
180 | * has to guarantee that all elements have been returned to the pool (ie: | ||
181 | * freed) prior to calling mempool_destroy(). | ||
182 | */ | ||
183 | void mempool_destroy(mempool_t *pool) | ||
184 | { | ||
185 | /* Check for outstanding elements */ | ||
186 | BUG_ON(pool->curr_nr != pool->min_nr); | ||
187 | free_pool(pool); | ||
188 | } | ||
189 | EXPORT_SYMBOL(mempool_destroy); | ||
190 | |||
191 | /** | ||
186 | * mempool_alloc - allocate an element from a specific memory pool | 192 | * mempool_alloc - allocate an element from a specific memory pool |
187 | * @pool: pointer to the memory pool which was allocated via | 193 | * @pool: pointer to the memory pool which was allocated via |
188 | * mempool_create(). | 194 | * mempool_create(). |
@@ -218,40 +224,28 @@ repeat_alloc: | |||
218 | if (likely(pool->curr_nr)) { | 224 | if (likely(pool->curr_nr)) { |
219 | element = remove_element(pool); | 225 | element = remove_element(pool); |
220 | spin_unlock_irqrestore(&pool->lock, flags); | 226 | spin_unlock_irqrestore(&pool->lock, flags); |
221 | /* paired with rmb in mempool_free(), read comment there */ | ||
222 | smp_wmb(); | ||
223 | return element; | 227 | return element; |
224 | } | 228 | } |
229 | spin_unlock_irqrestore(&pool->lock, flags); | ||
225 | 230 | ||
226 | /* | 231 | /* We must not sleep in the GFP_ATOMIC case */ |
227 | * We use gfp mask w/o __GFP_WAIT or IO for the first round. If | 232 | if (!(gfp_mask & __GFP_WAIT)) |
228 | * alloc failed with that and @pool was empty, retry immediately. | ||
229 | */ | ||
230 | if (gfp_temp != gfp_mask) { | ||
231 | spin_unlock_irqrestore(&pool->lock, flags); | ||
232 | gfp_temp = gfp_mask; | ||
233 | goto repeat_alloc; | ||
234 | } | ||
235 | |||
236 | /* We must not sleep if !__GFP_WAIT */ | ||
237 | if (!(gfp_mask & __GFP_WAIT)) { | ||
238 | spin_unlock_irqrestore(&pool->lock, flags); | ||
239 | return NULL; | 233 | return NULL; |
240 | } | ||
241 | 234 | ||
242 | /* Let's wait for someone else to return an element to @pool */ | 235 | /* Now start performing page reclaim */ |
236 | gfp_temp = gfp_mask; | ||
243 | init_wait(&wait); | 237 | init_wait(&wait); |
244 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); | 238 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); |
245 | 239 | smp_mb(); | |
246 | spin_unlock_irqrestore(&pool->lock, flags); | 240 | if (!pool->curr_nr) { |
247 | 241 | /* | |
248 | /* | 242 | * FIXME: this should be io_schedule(). The timeout is there |
249 | * FIXME: this should be io_schedule(). The timeout is there as a | 243 | * as a workaround for some DM problems in 2.6.18. |
250 | * workaround for some DM problems in 2.6.18. | 244 | */ |
251 | */ | 245 | io_schedule_timeout(5*HZ); |
252 | io_schedule_timeout(5*HZ); | 246 | } |
253 | |||
254 | finish_wait(&pool->wait, &wait); | 247 | finish_wait(&pool->wait, &wait); |
248 | |||
255 | goto repeat_alloc; | 249 | goto repeat_alloc; |
256 | } | 250 | } |
257 | EXPORT_SYMBOL(mempool_alloc); | 251 | EXPORT_SYMBOL(mempool_alloc); |
@@ -271,39 +265,7 @@ void mempool_free(void *element, mempool_t *pool) | |||
271 | if (unlikely(element == NULL)) | 265 | if (unlikely(element == NULL)) |
272 | return; | 266 | return; |
273 | 267 | ||
274 | /* | 268 | smp_mb(); |
275 | * Paired with the wmb in mempool_alloc(). The preceding read is | ||
276 | * for @element and the following @pool->curr_nr. This ensures | ||
277 | * that the visible value of @pool->curr_nr is from after the | ||
278 | * allocation of @element. This is necessary for fringe cases | ||
279 | * where @element was passed to this task without going through | ||
280 | * barriers. | ||
281 | * | ||
282 | * For example, assume @p is %NULL at the beginning and one task | ||
283 | * performs "p = mempool_alloc(...);" while another task is doing | ||
284 | * "while (!p) cpu_relax(); mempool_free(p, ...);". This function | ||
285 | * may end up using curr_nr value which is from before allocation | ||
286 | * of @p without the following rmb. | ||
287 | */ | ||
288 | smp_rmb(); | ||
289 | |||
290 | /* | ||
291 | * For correctness, we need a test which is guaranteed to trigger | ||
292 | * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr | ||
293 | * without locking achieves that and refilling as soon as possible | ||
294 | * is desirable. | ||
295 | * | ||
296 | * Because curr_nr visible here is always a value after the | ||
297 | * allocation of @element, any task which decremented curr_nr below | ||
298 | * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets | ||
299 | * incremented to min_nr afterwards. If curr_nr gets incremented | ||
300 | * to min_nr after the allocation of @element, the elements | ||
301 | * allocated after that are subject to the same guarantee. | ||
302 | * | ||
303 | * Waiters happen iff curr_nr is 0 and the above guarantee also | ||
304 | * ensures that there will be frees which return elements to the | ||
305 | * pool waking up the waiters. | ||
306 | */ | ||
307 | if (pool->curr_nr < pool->min_nr) { | 269 | if (pool->curr_nr < pool->min_nr) { |
308 | spin_lock_irqsave(&pool->lock, flags); | 270 | spin_lock_irqsave(&pool->lock, flags); |
309 | if (pool->curr_nr < pool->min_nr) { | 271 | if (pool->curr_nr < pool->min_nr) { |