aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-05-01 11:58:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-01 11:58:36 -0400
commitb84a35be0285229b0a8a5e2e04d79360c5b75562 (patch)
tree3ff63fde8534eb615b408d047b461015781f6a5b
parent8e30f272a93ec9c1d5c305c5040dfaebc880499d (diff)
[PATCH] mempool: NOMEMALLOC and NORETRY
Mempools have 2 problems. The first is that mempool_alloc can possibly get stuck in __alloc_pages when they should opt to fail, and take an element from their reserved pool. The second is that it will happily eat emergency PF_MEMALLOC reserves instead of going to their reserved pools. Fix the first by passing __GFP_NORETRY in the allocation calls in mempool_alloc. Fix the second by introducing a __GFP_MEMPOOL flag which directs the page allocator not to allocate from the reserve pool. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/gfp.h6
-rw-r--r--mm/mempool.c9
-rw-r--r--mm/page_alloc.c20
3 files changed, 23 insertions, 12 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 848a1baac079..af7407e8cfc5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -38,14 +38,16 @@ struct vm_area_struct;
38#define __GFP_NO_GROW 0x2000u /* Slab internal usage */ 38#define __GFP_NO_GROW 0x2000u /* Slab internal usage */
39#define __GFP_COMP 0x4000u /* Add compound page metadata */ 39#define __GFP_COMP 0x4000u /* Add compound page metadata */
40#define __GFP_ZERO 0x8000u /* Return zeroed page on success */ 40#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
41#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
41 42
42#define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ 43#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
43#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) 44#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
44 45
45/* if you forget to add the bitmask here kernel will crash, period */ 46/* if you forget to add the bitmask here kernel will crash, period */
46#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ 47#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
47 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ 48 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
48 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP) 49 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
50 __GFP_NOMEMALLOC)
49 51
50#define GFP_ATOMIC (__GFP_HIGH) 52#define GFP_ATOMIC (__GFP_HIGH)
51#define GFP_NOIO (__GFP_WAIT) 53#define GFP_NOIO (__GFP_WAIT)
diff --git a/mm/mempool.c b/mm/mempool.c
index b014ffeaa413..d691b5cb8022 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -198,11 +198,16 @@ void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask)
198 void *element; 198 void *element;
199 unsigned long flags; 199 unsigned long flags;
200 DEFINE_WAIT(wait); 200 DEFINE_WAIT(wait);
201 int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO); 201 int gfp_nowait;
202
203 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
204 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
205 gfp_mask |= __GFP_NOWARN; /* failures are OK */
206 gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
202 207
203 might_sleep_if(gfp_mask & __GFP_WAIT); 208 might_sleep_if(gfp_mask & __GFP_WAIT);
204repeat_alloc: 209repeat_alloc:
205 element = pool->alloc(gfp_nowait|__GFP_NOWARN, pool->pool_data); 210 element = pool->alloc(gfp_nowait, pool->pool_data);
206 if (likely(element != NULL)) 211 if (likely(element != NULL))
207 return element; 212 return element;
208 213
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 08e8627361a0..04a35b3d3262 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -799,14 +799,18 @@ __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
799 } 799 }
800 800
801 /* This allocation should allow future memory freeing. */ 801 /* This allocation should allow future memory freeing. */
802 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) && !in_interrupt()) { 802
803 /* go through the zonelist yet again, ignoring mins */ 803 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
804 for (i = 0; (z = zones[i]) != NULL; i++) { 804 && !in_interrupt()) {
805 if (!cpuset_zone_allowed(z)) 805 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
806 continue; 806 /* go through the zonelist yet again, ignoring mins */
807 page = buffered_rmqueue(z, order, gfp_mask); 807 for (i = 0; (z = zones[i]) != NULL; i++) {
808 if (page) 808 if (!cpuset_zone_allowed(z))
809 goto got_pg; 809 continue;
810 page = buffered_rmqueue(z, order, gfp_mask);
811 if (page)
812 goto got_pg;
813 }
810 } 814 }
811 goto nopage; 815 goto nopage;
812 } 816 }