aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempool.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-05-01 11:58:37 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-01 11:58:37 -0400
commit20a77776c24800d1e40a73f520cfcb32239568a9 (patch)
tree8a28cc68cf10b87d35b7603b2d6f26215390cc0f /mm/mempool.c
parentb84a35be0285229b0a8a5e2e04d79360c5b75562 (diff)
[PATCH] mempool: simplify alloc
Mempool is pretty clever. Looks too clever for its own good :) It shouldn't really know so much about page reclaim internals. - don't guess about what effective page reclaim might involve. - don't randomly flush out all dirty data if some unlikely thing happens (alloc returns NULL). page reclaim can (sort of :P) handle it. I think the main motivation is trying to avoid pool->lock at all costs. However the first allocation is attempted with __GFP_WAIT cleared, so it will be 'can_try_harder' if it hits the page allocator. So if allocation still fails, then we can probably afford to hit the pool->lock - and what's the alternative? Try page reclaim and hit zone->lru_lock? A nice upshot is that we don't need to do any fancy memory barriers or do (intentionally) racy access to pool-> fields outside the lock. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempool.c')
-rw-r--r--mm/mempool.c30
1 files changed, 9 insertions, 21 deletions
diff --git a/mm/mempool.c b/mm/mempool.c
index d691b5cb8022..e9a0a6337b21 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -198,36 +198,22 @@ void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask)
198 void *element; 198 void *element;
199 unsigned long flags; 199 unsigned long flags;
200 DEFINE_WAIT(wait); 200 DEFINE_WAIT(wait);
201 int gfp_nowait; 201 int gfp_temp;
202
203 might_sleep_if(gfp_mask & __GFP_WAIT);
202 204
203 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ 205 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
204 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ 206 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
205 gfp_mask |= __GFP_NOWARN; /* failures are OK */ 207 gfp_mask |= __GFP_NOWARN; /* failures are OK */
206 gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
207 208
208 might_sleep_if(gfp_mask & __GFP_WAIT); 209 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
210
209repeat_alloc: 211repeat_alloc:
210 element = pool->alloc(gfp_nowait, pool->pool_data); 212
213 element = pool->alloc(gfp_temp, pool->pool_data);
211 if (likely(element != NULL)) 214 if (likely(element != NULL))
212 return element; 215 return element;
213 216
214 /*
215 * If the pool is less than 50% full and we can perform effective
216 * page reclaim then try harder to allocate an element.
217 */
218 mb();
219 if ((gfp_mask & __GFP_FS) && (gfp_mask != gfp_nowait) &&
220 (pool->curr_nr <= pool->min_nr/2)) {
221 element = pool->alloc(gfp_mask, pool->pool_data);
222 if (likely(element != NULL))
223 return element;
224 }
225
226 /*
227 * Kick the VM at this point.
228 */
229 wakeup_bdflush(0);
230
231 spin_lock_irqsave(&pool->lock, flags); 217 spin_lock_irqsave(&pool->lock, flags);
232 if (likely(pool->curr_nr)) { 218 if (likely(pool->curr_nr)) {
233 element = remove_element(pool); 219 element = remove_element(pool);
@@ -240,6 +226,8 @@ repeat_alloc:
240 if (!(gfp_mask & __GFP_WAIT)) 226 if (!(gfp_mask & __GFP_WAIT))
241 return NULL; 227 return NULL;
242 228
229 /* Now start performing page reclaim */
230 gfp_temp = gfp_mask;
243 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); 231 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
244 mb(); 232 mb();
245 if (!pool->curr_nr) 233 if (!pool->curr_nr)