diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-17 23:24:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-18 16:12:57 -0400 |
commit | dcce284a259373f9e5570f2e33f79eca84fcf565 (patch) | |
tree | afc4b23208974f17c080ea3d2ecfbaca4254c010 /mm | |
parent | 9729a6eb5878a3daa18395f2b5fb38bf9359a761 (diff) |
mm: Extend gfp masking to the page allocator
The page allocator also needs the masking of gfp flags during boot,
so this moves it out of slab/slub and uses it with the page allocator
as well.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 15 | ||||
-rw-r--r-- | mm/slub.c | 12 |
3 files changed, 6 insertions, 24 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a5f3c278c573..6f0753fe694c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -73,6 +73,7 @@ unsigned long totalram_pages __read_mostly; | |||
73 | unsigned long totalreserve_pages __read_mostly; | 73 | unsigned long totalreserve_pages __read_mostly; |
74 | unsigned long highest_memmap_pfn __read_mostly; | 74 | unsigned long highest_memmap_pfn __read_mostly; |
75 | int percpu_pagelist_fraction; | 75 | int percpu_pagelist_fraction; |
76 | gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; | ||
76 | 77 | ||
77 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE | 78 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE |
78 | int pageblock_order __read_mostly; | 79 | int pageblock_order __read_mostly; |
@@ -1863,6 +1864,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | |||
1863 | struct page *page; | 1864 | struct page *page; |
1864 | int migratetype = allocflags_to_migratetype(gfp_mask); | 1865 | int migratetype = allocflags_to_migratetype(gfp_mask); |
1865 | 1866 | ||
1867 | gfp_mask &= gfp_allowed_mask; | ||
1868 | |||
1866 | lockdep_trace_alloc(gfp_mask); | 1869 | lockdep_trace_alloc(gfp_mask); |
1867 | 1870 | ||
1868 | might_sleep_if(gfp_mask & __GFP_WAIT); | 1871 | might_sleep_if(gfp_mask & __GFP_WAIT); |
@@ -305,12 +305,6 @@ struct kmem_list3 { | |||
305 | }; | 305 | }; |
306 | 306 | ||
307 | /* | 307 | /* |
308 | * The slab allocator is initialized with interrupts disabled. Therefore, make | ||
309 | * sure early boot allocations don't accidentally enable interrupts. | ||
310 | */ | ||
311 | static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | ||
312 | |||
313 | /* | ||
314 | * Need this for bootstrapping a per node allocator. | 308 | * Need this for bootstrapping a per node allocator. |
315 | */ | 309 | */ |
316 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) | 310 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) |
@@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void) | |||
1559 | { | 1553 | { |
1560 | struct kmem_cache *cachep; | 1554 | struct kmem_cache *cachep; |
1561 | 1555 | ||
1562 | /* | ||
1563 | * Interrupts are enabled now so all GFP allocations are safe. | ||
1564 | */ | ||
1565 | slab_gfp_mask = __GFP_BITS_MASK; | ||
1566 | |||
1567 | /* 6) resize the head arrays to their final sizes */ | 1556 | /* 6) resize the head arrays to their final sizes */ |
1568 | mutex_lock(&cache_chain_mutex); | 1557 | mutex_lock(&cache_chain_mutex); |
1569 | list_for_each_entry(cachep, &cache_chain, next) | 1558 | list_for_each_entry(cachep, &cache_chain, next) |
@@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3307 | unsigned long save_flags; | 3296 | unsigned long save_flags; |
3308 | void *ptr; | 3297 | void *ptr; |
3309 | 3298 | ||
3310 | flags &= slab_gfp_mask; | 3299 | flags &= gfp_allowed_mask; |
3311 | 3300 | ||
3312 | lockdep_trace_alloc(flags); | 3301 | lockdep_trace_alloc(flags); |
3313 | 3302 | ||
@@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3392 | unsigned long save_flags; | 3381 | unsigned long save_flags; |
3393 | void *objp; | 3382 | void *objp; |
3394 | 3383 | ||
3395 | flags &= slab_gfp_mask; | 3384 | flags &= gfp_allowed_mask; |
3396 | 3385 | ||
3397 | lockdep_trace_alloc(flags); | 3386 | lockdep_trace_alloc(flags); |
3398 | 3387 | ||
@@ -179,12 +179,6 @@ static enum { | |||
179 | SYSFS /* Sysfs up */ | 179 | SYSFS /* Sysfs up */ |
180 | } slab_state = DOWN; | 180 | } slab_state = DOWN; |
181 | 181 | ||
182 | /* | ||
183 | * The slab allocator is initialized with interrupts disabled. Therefore, make | ||
184 | * sure early boot allocations don't accidentally enable interrupts. | ||
185 | */ | ||
186 | static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; | ||
187 | |||
188 | /* A list of all slab caches on the system */ | 182 | /* A list of all slab caches on the system */ |
189 | static DECLARE_RWSEM(slub_lock); | 183 | static DECLARE_RWSEM(slub_lock); |
190 | static LIST_HEAD(slab_caches); | 184 | static LIST_HEAD(slab_caches); |
@@ -1692,7 +1686,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1692 | unsigned long flags; | 1686 | unsigned long flags; |
1693 | unsigned int objsize; | 1687 | unsigned int objsize; |
1694 | 1688 | ||
1695 | gfpflags &= slab_gfp_mask; | 1689 | gfpflags &= gfp_allowed_mask; |
1696 | 1690 | ||
1697 | lockdep_trace_alloc(gfpflags); | 1691 | lockdep_trace_alloc(gfpflags); |
1698 | might_sleep_if(gfpflags & __GFP_WAIT); | 1692 | might_sleep_if(gfpflags & __GFP_WAIT); |
@@ -3220,10 +3214,6 @@ void __init kmem_cache_init(void) | |||
3220 | 3214 | ||
3221 | void __init kmem_cache_init_late(void) | 3215 | void __init kmem_cache_init_late(void) |
3222 | { | 3216 | { |
3223 | /* | ||
3224 | * Interrupts are enabled now so all GFP allocations are safe. | ||
3225 | */ | ||
3226 | slab_gfp_mask = __GFP_BITS_MASK; | ||
3227 | } | 3217 | } |
3228 | 3218 | ||
3229 | /* | 3219 | /* |