aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c117
1 files changed, 75 insertions, 42 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f85831da9080..f46b65d124e5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -107,6 +107,7 @@
107#include <linux/string.h> 107#include <linux/string.h>
108#include <linux/uaccess.h> 108#include <linux/uaccess.h>
109#include <linux/nodemask.h> 109#include <linux/nodemask.h>
110#include <linux/kmemleak.h>
110#include <linux/mempolicy.h> 111#include <linux/mempolicy.h>
111#include <linux/mutex.h> 112#include <linux/mutex.h>
112#include <linux/fault-inject.h> 113#include <linux/fault-inject.h>
@@ -178,13 +179,13 @@
178 SLAB_STORE_USER | \ 179 SLAB_STORE_USER | \
179 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
180 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
181 SLAB_DEBUG_OBJECTS) 182 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
182#else 183#else
183# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 184# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
184 SLAB_CACHE_DMA | \ 185 SLAB_CACHE_DMA | \
185 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 186 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
186 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 187 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
187 SLAB_DEBUG_OBJECTS) 188 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
188#endif 189#endif
189 190
190/* 191/*
@@ -315,7 +316,7 @@ static int drain_freelist(struct kmem_cache *cache,
315 struct kmem_list3 *l3, int tofree); 316 struct kmem_list3 *l3, int tofree);
316static void free_block(struct kmem_cache *cachep, void **objpp, int len, 317static void free_block(struct kmem_cache *cachep, void **objpp, int len,
317 int node); 318 int node);
318static int enable_cpucache(struct kmem_cache *cachep); 319static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
319static void cache_reap(struct work_struct *unused); 320static void cache_reap(struct work_struct *unused);
320 321
321/* 322/*
@@ -958,12 +959,20 @@ static void __cpuinit start_cpu_timer(int cpu)
958} 959}
959 960
960static struct array_cache *alloc_arraycache(int node, int entries, 961static struct array_cache *alloc_arraycache(int node, int entries,
961 int batchcount) 962 int batchcount, gfp_t gfp)
962{ 963{
963 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 964 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
964 struct array_cache *nc = NULL; 965 struct array_cache *nc = NULL;
965 966
966 nc = kmalloc_node(memsize, GFP_KERNEL, node); 967 nc = kmalloc_node(memsize, gfp, node);
968 /*
969 * The array_cache structures contain pointers to free object.
970 * However, when such objects are allocated or transfered to another
971 * cache the pointers are not cleared and they could be counted as
972 * valid references during a kmemleak scan. Therefore, kmemleak must
973 * not scan such objects.
974 */
975 kmemleak_no_scan(nc);
967 if (nc) { 976 if (nc) {
968 nc->avail = 0; 977 nc->avail = 0;
969 nc->limit = entries; 978 nc->limit = entries;
@@ -1003,7 +1012,7 @@ static int transfer_objects(struct array_cache *to,
1003#define drain_alien_cache(cachep, alien) do { } while (0) 1012#define drain_alien_cache(cachep, alien) do { } while (0)
1004#define reap_alien(cachep, l3) do { } while (0) 1013#define reap_alien(cachep, l3) do { } while (0)
1005 1014
1006static inline struct array_cache **alloc_alien_cache(int node, int limit) 1015static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1007{ 1016{
1008 return (struct array_cache **)BAD_ALIEN_MAGIC; 1017 return (struct array_cache **)BAD_ALIEN_MAGIC;
1009} 1018}
@@ -1034,7 +1043,7 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1034static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1043static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1035static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1044static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1036 1045
1037static struct array_cache **alloc_alien_cache(int node, int limit) 1046static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1038{ 1047{
1039 struct array_cache **ac_ptr; 1048 struct array_cache **ac_ptr;
1040 int memsize = sizeof(void *) * nr_node_ids; 1049 int memsize = sizeof(void *) * nr_node_ids;
@@ -1042,14 +1051,14 @@ static struct array_cache **alloc_alien_cache(int node, int limit)
1042 1051
1043 if (limit > 1) 1052 if (limit > 1)
1044 limit = 12; 1053 limit = 12;
1045 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 1054 ac_ptr = kmalloc_node(memsize, gfp, node);
1046 if (ac_ptr) { 1055 if (ac_ptr) {
1047 for_each_node(i) { 1056 for_each_node(i) {
1048 if (i == node || !node_online(i)) { 1057 if (i == node || !node_online(i)) {
1049 ac_ptr[i] = NULL; 1058 ac_ptr[i] = NULL;
1050 continue; 1059 continue;
1051 } 1060 }
1052 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 1061 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
1053 if (!ac_ptr[i]) { 1062 if (!ac_ptr[i]) {
1054 for (i--; i >= 0; i--) 1063 for (i--; i >= 0; i--)
1055 kfree(ac_ptr[i]); 1064 kfree(ac_ptr[i]);
@@ -1282,20 +1291,20 @@ static int __cpuinit cpuup_prepare(long cpu)
1282 struct array_cache **alien = NULL; 1291 struct array_cache **alien = NULL;
1283 1292
1284 nc = alloc_arraycache(node, cachep->limit, 1293 nc = alloc_arraycache(node, cachep->limit,
1285 cachep->batchcount); 1294 cachep->batchcount, GFP_KERNEL);
1286 if (!nc) 1295 if (!nc)
1287 goto bad; 1296 goto bad;
1288 if (cachep->shared) { 1297 if (cachep->shared) {
1289 shared = alloc_arraycache(node, 1298 shared = alloc_arraycache(node,
1290 cachep->shared * cachep->batchcount, 1299 cachep->shared * cachep->batchcount,
1291 0xbaadf00d); 1300 0xbaadf00d, GFP_KERNEL);
1292 if (!shared) { 1301 if (!shared) {
1293 kfree(nc); 1302 kfree(nc);
1294 goto bad; 1303 goto bad;
1295 } 1304 }
1296 } 1305 }
1297 if (use_alien_caches) { 1306 if (use_alien_caches) {
1298 alien = alloc_alien_cache(node, cachep->limit); 1307 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1299 if (!alien) { 1308 if (!alien) {
1300 kfree(shared); 1309 kfree(shared);
1301 kfree(nc); 1310 kfree(nc);
@@ -1399,10 +1408,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1399{ 1408{
1400 struct kmem_list3 *ptr; 1409 struct kmem_list3 *ptr;
1401 1410
1402 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1411 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
1403 BUG_ON(!ptr); 1412 BUG_ON(!ptr);
1404 1413
1405 local_irq_disable();
1406 memcpy(ptr, list, sizeof(struct kmem_list3)); 1414 memcpy(ptr, list, sizeof(struct kmem_list3));
1407 /* 1415 /*
1408 * Do not assume that spinlocks can be initialized via memcpy: 1416 * Do not assume that spinlocks can be initialized via memcpy:
@@ -1411,7 +1419,6 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1411 1419
1412 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1420 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1413 cachep->nodelists[nodeid] = ptr; 1421 cachep->nodelists[nodeid] = ptr;
1414 local_irq_enable();
1415} 1422}
1416 1423
1417/* 1424/*
@@ -1575,9 +1582,8 @@ void __init kmem_cache_init(void)
1575 { 1582 {
1576 struct array_cache *ptr; 1583 struct array_cache *ptr;
1577 1584
1578 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1585 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1579 1586
1580 local_irq_disable();
1581 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1587 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1582 memcpy(ptr, cpu_cache_get(&cache_cache), 1588 memcpy(ptr, cpu_cache_get(&cache_cache),
1583 sizeof(struct arraycache_init)); 1589 sizeof(struct arraycache_init));
@@ -1587,11 +1593,9 @@ void __init kmem_cache_init(void)
1587 spin_lock_init(&ptr->lock); 1593 spin_lock_init(&ptr->lock);
1588 1594
1589 cache_cache.array[smp_processor_id()] = ptr; 1595 cache_cache.array[smp_processor_id()] = ptr;
1590 local_irq_enable();
1591 1596
1592 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1597 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1593 1598
1594 local_irq_disable();
1595 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1599 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1596 != &initarray_generic.cache); 1600 != &initarray_generic.cache);
1597 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1601 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
@@ -1603,7 +1607,6 @@ void __init kmem_cache_init(void)
1603 1607
1604 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1608 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1605 ptr; 1609 ptr;
1606 local_irq_enable();
1607 } 1610 }
1608 /* 5) Replace the bootstrap kmem_list3's */ 1611 /* 5) Replace the bootstrap kmem_list3's */
1609 { 1612 {
@@ -1627,7 +1630,7 @@ void __init kmem_cache_init(void)
1627 struct kmem_cache *cachep; 1630 struct kmem_cache *cachep;
1628 mutex_lock(&cache_chain_mutex); 1631 mutex_lock(&cache_chain_mutex);
1629 list_for_each_entry(cachep, &cache_chain, next) 1632 list_for_each_entry(cachep, &cache_chain, next)
1630 if (enable_cpucache(cachep)) 1633 if (enable_cpucache(cachep, GFP_NOWAIT))
1631 BUG(); 1634 BUG();
1632 mutex_unlock(&cache_chain_mutex); 1635 mutex_unlock(&cache_chain_mutex);
1633 } 1636 }
@@ -2064,10 +2067,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2064 return left_over; 2067 return left_over;
2065} 2068}
2066 2069
2067static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) 2070static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2068{ 2071{
2069 if (g_cpucache_up == FULL) 2072 if (g_cpucache_up == FULL)
2070 return enable_cpucache(cachep); 2073 return enable_cpucache(cachep, gfp);
2071 2074
2072 if (g_cpucache_up == NONE) { 2075 if (g_cpucache_up == NONE) {
2073 /* 2076 /*
@@ -2089,7 +2092,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2089 g_cpucache_up = PARTIAL_AC; 2092 g_cpucache_up = PARTIAL_AC;
2090 } else { 2093 } else {
2091 cachep->array[smp_processor_id()] = 2094 cachep->array[smp_processor_id()] =
2092 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 2095 kmalloc(sizeof(struct arraycache_init), gfp);
2093 2096
2094 if (g_cpucache_up == PARTIAL_AC) { 2097 if (g_cpucache_up == PARTIAL_AC) {
2095 set_up_list3s(cachep, SIZE_L3); 2098 set_up_list3s(cachep, SIZE_L3);
@@ -2153,6 +2156,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2153{ 2156{
2154 size_t left_over, slab_size, ralign; 2157 size_t left_over, slab_size, ralign;
2155 struct kmem_cache *cachep = NULL, *pc; 2158 struct kmem_cache *cachep = NULL, *pc;
2159 gfp_t gfp;
2156 2160
2157 /* 2161 /*
2158 * Sanity checks... these are all serious usage bugs. 2162 * Sanity checks... these are all serious usage bugs.
@@ -2168,8 +2172,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2168 * We use cache_chain_mutex to ensure a consistent view of 2172 * We use cache_chain_mutex to ensure a consistent view of
2169 * cpu_online_mask as well. Please see cpuup_callback 2173 * cpu_online_mask as well. Please see cpuup_callback
2170 */ 2174 */
2171 get_online_cpus(); 2175 if (slab_is_available()) {
2172 mutex_lock(&cache_chain_mutex); 2176 get_online_cpus();
2177 mutex_lock(&cache_chain_mutex);
2178 }
2173 2179
2174 list_for_each_entry(pc, &cache_chain, next) { 2180 list_for_each_entry(pc, &cache_chain, next) {
2175 char tmp; 2181 char tmp;
@@ -2278,8 +2284,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2278 */ 2284 */
2279 align = ralign; 2285 align = ralign;
2280 2286
2287 if (slab_is_available())
2288 gfp = GFP_KERNEL;
2289 else
2290 gfp = GFP_NOWAIT;
2291
2281 /* Get cache's description obj. */ 2292 /* Get cache's description obj. */
2282 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); 2293 cachep = kmem_cache_zalloc(&cache_cache, gfp);
2283 if (!cachep) 2294 if (!cachep)
2284 goto oops; 2295 goto oops;
2285 2296
@@ -2382,7 +2393,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2382 cachep->ctor = ctor; 2393 cachep->ctor = ctor;
2383 cachep->name = name; 2394 cachep->name = name;
2384 2395
2385 if (setup_cpu_cache(cachep)) { 2396 if (setup_cpu_cache(cachep, gfp)) {
2386 __kmem_cache_destroy(cachep); 2397 __kmem_cache_destroy(cachep);
2387 cachep = NULL; 2398 cachep = NULL;
2388 goto oops; 2399 goto oops;
@@ -2394,8 +2405,10 @@ oops:
2394 if (!cachep && (flags & SLAB_PANIC)) 2405 if (!cachep && (flags & SLAB_PANIC))
2395 panic("kmem_cache_create(): failed to create slab `%s'\n", 2406 panic("kmem_cache_create(): failed to create slab `%s'\n",
2396 name); 2407 name);
2397 mutex_unlock(&cache_chain_mutex); 2408 if (slab_is_available()) {
2398 put_online_cpus(); 2409 mutex_unlock(&cache_chain_mutex);
2410 put_online_cpus();
2411 }
2399 return cachep; 2412 return cachep;
2400} 2413}
2401EXPORT_SYMBOL(kmem_cache_create); 2414EXPORT_SYMBOL(kmem_cache_create);
@@ -2621,6 +2634,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2621 /* Slab management obj is off-slab. */ 2634 /* Slab management obj is off-slab. */
2622 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2635 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2623 local_flags, nodeid); 2636 local_flags, nodeid);
2637 /*
2638 * If the first object in the slab is leaked (it's allocated
2639 * but no one has a reference to it), we want to make sure
2640 * kmemleak does not treat the ->s_mem pointer as a reference
2641 * to the object. Otherwise we will not report the leak.
2642 */
2643 kmemleak_scan_area(slabp, offsetof(struct slab, list),
2644 sizeof(struct list_head), local_flags);
2624 if (!slabp) 2645 if (!slabp)
2625 return NULL; 2646 return NULL;
2626 } else { 2647 } else {
@@ -3141,6 +3162,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3141 STATS_INC_ALLOCMISS(cachep); 3162 STATS_INC_ALLOCMISS(cachep);
3142 objp = cache_alloc_refill(cachep, flags); 3163 objp = cache_alloc_refill(cachep, flags);
3143 } 3164 }
3165 /*
3166 * To avoid a false negative, if an object that is in one of the
3167 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3168 * treat the array pointers as a reference to the object.
3169 */
3170 kmemleak_erase(&ac->entry[ac->avail]);
3144 return objp; 3171 return objp;
3145} 3172}
3146 3173
@@ -3360,6 +3387,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3360 out: 3387 out:
3361 local_irq_restore(save_flags); 3388 local_irq_restore(save_flags);
3362 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3389 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3390 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3391 flags);
3363 3392
3364 if (unlikely((flags & __GFP_ZERO) && ptr)) 3393 if (unlikely((flags & __GFP_ZERO) && ptr))
3365 memset(ptr, 0, obj_size(cachep)); 3394 memset(ptr, 0, obj_size(cachep));
@@ -3415,6 +3444,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3415 objp = __do_cache_alloc(cachep, flags); 3444 objp = __do_cache_alloc(cachep, flags);
3416 local_irq_restore(save_flags); 3445 local_irq_restore(save_flags);
3417 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3446 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3447 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
3448 flags);
3418 prefetchw(objp); 3449 prefetchw(objp);
3419 3450
3420 if (unlikely((flags & __GFP_ZERO) && objp)) 3451 if (unlikely((flags & __GFP_ZERO) && objp))
@@ -3530,6 +3561,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3530 struct array_cache *ac = cpu_cache_get(cachep); 3561 struct array_cache *ac = cpu_cache_get(cachep);
3531 3562
3532 check_irq_off(); 3563 check_irq_off();
3564 kmemleak_free_recursive(objp, cachep->flags);
3533 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3565 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3534 3566
3535 /* 3567 /*
@@ -3802,7 +3834,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
3802/* 3834/*
3803 * This initializes kmem_list3 or resizes various caches for all nodes. 3835 * This initializes kmem_list3 or resizes various caches for all nodes.
3804 */ 3836 */
3805static int alloc_kmemlist(struct kmem_cache *cachep) 3837static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3806{ 3838{
3807 int node; 3839 int node;
3808 struct kmem_list3 *l3; 3840 struct kmem_list3 *l3;
@@ -3812,7 +3844,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3812 for_each_online_node(node) { 3844 for_each_online_node(node) {
3813 3845
3814 if (use_alien_caches) { 3846 if (use_alien_caches) {
3815 new_alien = alloc_alien_cache(node, cachep->limit); 3847 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3816 if (!new_alien) 3848 if (!new_alien)
3817 goto fail; 3849 goto fail;
3818 } 3850 }
@@ -3821,7 +3853,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3821 if (cachep->shared) { 3853 if (cachep->shared) {
3822 new_shared = alloc_arraycache(node, 3854 new_shared = alloc_arraycache(node,
3823 cachep->shared*cachep->batchcount, 3855 cachep->shared*cachep->batchcount,
3824 0xbaadf00d); 3856 0xbaadf00d, gfp);
3825 if (!new_shared) { 3857 if (!new_shared) {
3826 free_alien_cache(new_alien); 3858 free_alien_cache(new_alien);
3827 goto fail; 3859 goto fail;
@@ -3850,7 +3882,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
3850 free_alien_cache(new_alien); 3882 free_alien_cache(new_alien);
3851 continue; 3883 continue;
3852 } 3884 }
3853 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3885 l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
3854 if (!l3) { 3886 if (!l3) {
3855 free_alien_cache(new_alien); 3887 free_alien_cache(new_alien);
3856 kfree(new_shared); 3888 kfree(new_shared);
@@ -3906,18 +3938,18 @@ static void do_ccupdate_local(void *info)
3906 3938
3907/* Always called with the cache_chain_mutex held */ 3939/* Always called with the cache_chain_mutex held */
3908static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3940static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3909 int batchcount, int shared) 3941 int batchcount, int shared, gfp_t gfp)
3910{ 3942{
3911 struct ccupdate_struct *new; 3943 struct ccupdate_struct *new;
3912 int i; 3944 int i;
3913 3945
3914 new = kzalloc(sizeof(*new), GFP_KERNEL); 3946 new = kzalloc(sizeof(*new), gfp);
3915 if (!new) 3947 if (!new)
3916 return -ENOMEM; 3948 return -ENOMEM;
3917 3949
3918 for_each_online_cpu(i) { 3950 for_each_online_cpu(i) {
3919 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3951 new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
3920 batchcount); 3952 batchcount, gfp);
3921 if (!new->new[i]) { 3953 if (!new->new[i]) {
3922 for (i--; i >= 0; i--) 3954 for (i--; i >= 0; i--)
3923 kfree(new->new[i]); 3955 kfree(new->new[i]);
@@ -3944,11 +3976,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3944 kfree(ccold); 3976 kfree(ccold);
3945 } 3977 }
3946 kfree(new); 3978 kfree(new);
3947 return alloc_kmemlist(cachep); 3979 return alloc_kmemlist(cachep, gfp);
3948} 3980}
3949 3981
3950/* Called with cache_chain_mutex held always */ 3982/* Called with cache_chain_mutex held always */
3951static int enable_cpucache(struct kmem_cache *cachep) 3983static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3952{ 3984{
3953 int err; 3985 int err;
3954 int limit, shared; 3986 int limit, shared;
@@ -3994,7 +4026,7 @@ static int enable_cpucache(struct kmem_cache *cachep)
3994 if (limit > 32) 4026 if (limit > 32)
3995 limit = 32; 4027 limit = 32;
3996#endif 4028#endif
3997 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 4029 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
3998 if (err) 4030 if (err)
3999 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 4031 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4000 cachep->name, -err); 4032 cachep->name, -err);
@@ -4300,7 +4332,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4300 res = 0; 4332 res = 0;
4301 } else { 4333 } else {
4302 res = do_tune_cpucache(cachep, limit, 4334 res = do_tune_cpucache(cachep, limit,
4303 batchcount, shared); 4335 batchcount, shared,
4336 GFP_KERNEL);
4304 } 4337 }
4305 break; 4338 break;
4306 } 4339 }