diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 85 |
1 files changed, 45 insertions, 40 deletions
@@ -315,7 +315,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
315 | struct kmem_list3 *l3, int tofree); | 315 | struct kmem_list3 *l3, int tofree); |
316 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 316 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
317 | int node); | 317 | int node); |
318 | static int enable_cpucache(struct kmem_cache *cachep); | 318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); |
319 | static void cache_reap(struct work_struct *unused); | 319 | static void cache_reap(struct work_struct *unused); |
320 | 320 | ||
321 | /* | 321 | /* |
@@ -958,12 +958,12 @@ static void __cpuinit start_cpu_timer(int cpu) | |||
958 | } | 958 | } |
959 | 959 | ||
960 | static struct array_cache *alloc_arraycache(int node, int entries, | 960 | static struct array_cache *alloc_arraycache(int node, int entries, |
961 | int batchcount) | 961 | int batchcount, gfp_t gfp) |
962 | { | 962 | { |
963 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); | 963 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); |
964 | struct array_cache *nc = NULL; | 964 | struct array_cache *nc = NULL; |
965 | 965 | ||
966 | nc = kmalloc_node(memsize, GFP_KERNEL, node); | 966 | nc = kmalloc_node(memsize, gfp, node); |
967 | if (nc) { | 967 | if (nc) { |
968 | nc->avail = 0; | 968 | nc->avail = 0; |
969 | nc->limit = entries; | 969 | nc->limit = entries; |
@@ -1003,7 +1003,7 @@ static int transfer_objects(struct array_cache *to, | |||
1003 | #define drain_alien_cache(cachep, alien) do { } while (0) | 1003 | #define drain_alien_cache(cachep, alien) do { } while (0) |
1004 | #define reap_alien(cachep, l3) do { } while (0) | 1004 | #define reap_alien(cachep, l3) do { } while (0) |
1005 | 1005 | ||
1006 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 1006 | static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
1007 | { | 1007 | { |
1008 | return (struct array_cache **)BAD_ALIEN_MAGIC; | 1008 | return (struct array_cache **)BAD_ALIEN_MAGIC; |
1009 | } | 1009 | } |
@@ -1034,7 +1034,7 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep, | |||
1034 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); | 1034 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); |
1035 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | 1035 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); |
1036 | 1036 | ||
1037 | static struct array_cache **alloc_alien_cache(int node, int limit) | 1037 | static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
1038 | { | 1038 | { |
1039 | struct array_cache **ac_ptr; | 1039 | struct array_cache **ac_ptr; |
1040 | int memsize = sizeof(void *) * nr_node_ids; | 1040 | int memsize = sizeof(void *) * nr_node_ids; |
@@ -1042,14 +1042,14 @@ static struct array_cache **alloc_alien_cache(int node, int limit) | |||
1042 | 1042 | ||
1043 | if (limit > 1) | 1043 | if (limit > 1) |
1044 | limit = 12; | 1044 | limit = 12; |
1045 | ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); | 1045 | ac_ptr = kmalloc_node(memsize, gfp, node); |
1046 | if (ac_ptr) { | 1046 | if (ac_ptr) { |
1047 | for_each_node(i) { | 1047 | for_each_node(i) { |
1048 | if (i == node || !node_online(i)) { | 1048 | if (i == node || !node_online(i)) { |
1049 | ac_ptr[i] = NULL; | 1049 | ac_ptr[i] = NULL; |
1050 | continue; | 1050 | continue; |
1051 | } | 1051 | } |
1052 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); | 1052 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); |
1053 | if (!ac_ptr[i]) { | 1053 | if (!ac_ptr[i]) { |
1054 | for (i--; i >= 0; i--) | 1054 | for (i--; i >= 0; i--) |
1055 | kfree(ac_ptr[i]); | 1055 | kfree(ac_ptr[i]); |
@@ -1282,20 +1282,20 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1282 | struct array_cache **alien = NULL; | 1282 | struct array_cache **alien = NULL; |
1283 | 1283 | ||
1284 | nc = alloc_arraycache(node, cachep->limit, | 1284 | nc = alloc_arraycache(node, cachep->limit, |
1285 | cachep->batchcount); | 1285 | cachep->batchcount, GFP_KERNEL); |
1286 | if (!nc) | 1286 | if (!nc) |
1287 | goto bad; | 1287 | goto bad; |
1288 | if (cachep->shared) { | 1288 | if (cachep->shared) { |
1289 | shared = alloc_arraycache(node, | 1289 | shared = alloc_arraycache(node, |
1290 | cachep->shared * cachep->batchcount, | 1290 | cachep->shared * cachep->batchcount, |
1291 | 0xbaadf00d); | 1291 | 0xbaadf00d, GFP_KERNEL); |
1292 | if (!shared) { | 1292 | if (!shared) { |
1293 | kfree(nc); | 1293 | kfree(nc); |
1294 | goto bad; | 1294 | goto bad; |
1295 | } | 1295 | } |
1296 | } | 1296 | } |
1297 | if (use_alien_caches) { | 1297 | if (use_alien_caches) { |
1298 | alien = alloc_alien_cache(node, cachep->limit); | 1298 | alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); |
1299 | if (!alien) { | 1299 | if (!alien) { |
1300 | kfree(shared); | 1300 | kfree(shared); |
1301 | kfree(nc); | 1301 | kfree(nc); |
@@ -1399,10 +1399,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | |||
1399 | { | 1399 | { |
1400 | struct kmem_list3 *ptr; | 1400 | struct kmem_list3 *ptr; |
1401 | 1401 | ||
1402 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); | 1402 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid); |
1403 | BUG_ON(!ptr); | 1403 | BUG_ON(!ptr); |
1404 | 1404 | ||
1405 | local_irq_disable(); | ||
1406 | memcpy(ptr, list, sizeof(struct kmem_list3)); | 1405 | memcpy(ptr, list, sizeof(struct kmem_list3)); |
1407 | /* | 1406 | /* |
1408 | * Do not assume that spinlocks can be initialized via memcpy: | 1407 | * Do not assume that spinlocks can be initialized via memcpy: |
@@ -1411,7 +1410,6 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | |||
1411 | 1410 | ||
1412 | MAKE_ALL_LISTS(cachep, ptr, nodeid); | 1411 | MAKE_ALL_LISTS(cachep, ptr, nodeid); |
1413 | cachep->nodelists[nodeid] = ptr; | 1412 | cachep->nodelists[nodeid] = ptr; |
1414 | local_irq_enable(); | ||
1415 | } | 1413 | } |
1416 | 1414 | ||
1417 | /* | 1415 | /* |
@@ -1575,9 +1573,8 @@ void __init kmem_cache_init(void) | |||
1575 | { | 1573 | { |
1576 | struct array_cache *ptr; | 1574 | struct array_cache *ptr; |
1577 | 1575 | ||
1578 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1576 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1579 | 1577 | ||
1580 | local_irq_disable(); | ||
1581 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 1578 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); |
1582 | memcpy(ptr, cpu_cache_get(&cache_cache), | 1579 | memcpy(ptr, cpu_cache_get(&cache_cache), |
1583 | sizeof(struct arraycache_init)); | 1580 | sizeof(struct arraycache_init)); |
@@ -1587,11 +1584,9 @@ void __init kmem_cache_init(void) | |||
1587 | spin_lock_init(&ptr->lock); | 1584 | spin_lock_init(&ptr->lock); |
1588 | 1585 | ||
1589 | cache_cache.array[smp_processor_id()] = ptr; | 1586 | cache_cache.array[smp_processor_id()] = ptr; |
1590 | local_irq_enable(); | ||
1591 | 1587 | ||
1592 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1588 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1593 | 1589 | ||
1594 | local_irq_disable(); | ||
1595 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) | 1590 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) |
1596 | != &initarray_generic.cache); | 1591 | != &initarray_generic.cache); |
1597 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 1592 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), |
@@ -1603,7 +1598,6 @@ void __init kmem_cache_init(void) | |||
1603 | 1598 | ||
1604 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 1599 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = |
1605 | ptr; | 1600 | ptr; |
1606 | local_irq_enable(); | ||
1607 | } | 1601 | } |
1608 | /* 5) Replace the bootstrap kmem_list3's */ | 1602 | /* 5) Replace the bootstrap kmem_list3's */ |
1609 | { | 1603 | { |
@@ -1627,7 +1621,7 @@ void __init kmem_cache_init(void) | |||
1627 | struct kmem_cache *cachep; | 1621 | struct kmem_cache *cachep; |
1628 | mutex_lock(&cache_chain_mutex); | 1622 | mutex_lock(&cache_chain_mutex); |
1629 | list_for_each_entry(cachep, &cache_chain, next) | 1623 | list_for_each_entry(cachep, &cache_chain, next) |
1630 | if (enable_cpucache(cachep)) | 1624 | if (enable_cpucache(cachep, GFP_NOWAIT)) |
1631 | BUG(); | 1625 | BUG(); |
1632 | mutex_unlock(&cache_chain_mutex); | 1626 | mutex_unlock(&cache_chain_mutex); |
1633 | } | 1627 | } |
@@ -2064,10 +2058,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2064 | return left_over; | 2058 | return left_over; |
2065 | } | 2059 | } |
2066 | 2060 | ||
2067 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | 2061 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) |
2068 | { | 2062 | { |
2069 | if (g_cpucache_up == FULL) | 2063 | if (g_cpucache_up == FULL) |
2070 | return enable_cpucache(cachep); | 2064 | return enable_cpucache(cachep, gfp); |
2071 | 2065 | ||
2072 | if (g_cpucache_up == NONE) { | 2066 | if (g_cpucache_up == NONE) { |
2073 | /* | 2067 | /* |
@@ -2089,7 +2083,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | |||
2089 | g_cpucache_up = PARTIAL_AC; | 2083 | g_cpucache_up = PARTIAL_AC; |
2090 | } else { | 2084 | } else { |
2091 | cachep->array[smp_processor_id()] = | 2085 | cachep->array[smp_processor_id()] = |
2092 | kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 2086 | kmalloc(sizeof(struct arraycache_init), gfp); |
2093 | 2087 | ||
2094 | if (g_cpucache_up == PARTIAL_AC) { | 2088 | if (g_cpucache_up == PARTIAL_AC) { |
2095 | set_up_list3s(cachep, SIZE_L3); | 2089 | set_up_list3s(cachep, SIZE_L3); |
@@ -2153,6 +2147,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2153 | { | 2147 | { |
2154 | size_t left_over, slab_size, ralign; | 2148 | size_t left_over, slab_size, ralign; |
2155 | struct kmem_cache *cachep = NULL, *pc; | 2149 | struct kmem_cache *cachep = NULL, *pc; |
2150 | gfp_t gfp; | ||
2156 | 2151 | ||
2157 | /* | 2152 | /* |
2158 | * Sanity checks... these are all serious usage bugs. | 2153 | * Sanity checks... these are all serious usage bugs. |
@@ -2168,8 +2163,10 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2168 | * We use cache_chain_mutex to ensure a consistent view of | 2163 | * We use cache_chain_mutex to ensure a consistent view of |
2169 | * cpu_online_mask as well. Please see cpuup_callback | 2164 | * cpu_online_mask as well. Please see cpuup_callback |
2170 | */ | 2165 | */ |
2171 | get_online_cpus(); | 2166 | if (slab_is_available()) { |
2172 | mutex_lock(&cache_chain_mutex); | 2167 | get_online_cpus(); |
2168 | mutex_lock(&cache_chain_mutex); | ||
2169 | } | ||
2173 | 2170 | ||
2174 | list_for_each_entry(pc, &cache_chain, next) { | 2171 | list_for_each_entry(pc, &cache_chain, next) { |
2175 | char tmp; | 2172 | char tmp; |
@@ -2278,8 +2275,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2278 | */ | 2275 | */ |
2279 | align = ralign; | 2276 | align = ralign; |
2280 | 2277 | ||
2278 | if (slab_is_available()) | ||
2279 | gfp = GFP_KERNEL; | ||
2280 | else | ||
2281 | gfp = GFP_NOWAIT; | ||
2282 | |||
2281 | /* Get cache's description obj. */ | 2283 | /* Get cache's description obj. */ |
2282 | cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); | 2284 | cachep = kmem_cache_zalloc(&cache_cache, gfp); |
2283 | if (!cachep) | 2285 | if (!cachep) |
2284 | goto oops; | 2286 | goto oops; |
2285 | 2287 | ||
@@ -2382,7 +2384,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2382 | cachep->ctor = ctor; | 2384 | cachep->ctor = ctor; |
2383 | cachep->name = name; | 2385 | cachep->name = name; |
2384 | 2386 | ||
2385 | if (setup_cpu_cache(cachep)) { | 2387 | if (setup_cpu_cache(cachep, gfp)) { |
2386 | __kmem_cache_destroy(cachep); | 2388 | __kmem_cache_destroy(cachep); |
2387 | cachep = NULL; | 2389 | cachep = NULL; |
2388 | goto oops; | 2390 | goto oops; |
@@ -2394,8 +2396,10 @@ oops: | |||
2394 | if (!cachep && (flags & SLAB_PANIC)) | 2396 | if (!cachep && (flags & SLAB_PANIC)) |
2395 | panic("kmem_cache_create(): failed to create slab `%s'\n", | 2397 | panic("kmem_cache_create(): failed to create slab `%s'\n", |
2396 | name); | 2398 | name); |
2397 | mutex_unlock(&cache_chain_mutex); | 2399 | if (slab_is_available()) { |
2398 | put_online_cpus(); | 2400 | mutex_unlock(&cache_chain_mutex); |
2401 | put_online_cpus(); | ||
2402 | } | ||
2399 | return cachep; | 2403 | return cachep; |
2400 | } | 2404 | } |
2401 | EXPORT_SYMBOL(kmem_cache_create); | 2405 | EXPORT_SYMBOL(kmem_cache_create); |
@@ -3802,7 +3806,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name); | |||
3802 | /* | 3806 | /* |
3803 | * This initializes kmem_list3 or resizes various caches for all nodes. | 3807 | * This initializes kmem_list3 or resizes various caches for all nodes. |
3804 | */ | 3808 | */ |
3805 | static int alloc_kmemlist(struct kmem_cache *cachep) | 3809 | static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) |
3806 | { | 3810 | { |
3807 | int node; | 3811 | int node; |
3808 | struct kmem_list3 *l3; | 3812 | struct kmem_list3 *l3; |
@@ -3812,7 +3816,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3812 | for_each_online_node(node) { | 3816 | for_each_online_node(node) { |
3813 | 3817 | ||
3814 | if (use_alien_caches) { | 3818 | if (use_alien_caches) { |
3815 | new_alien = alloc_alien_cache(node, cachep->limit); | 3819 | new_alien = alloc_alien_cache(node, cachep->limit, gfp); |
3816 | if (!new_alien) | 3820 | if (!new_alien) |
3817 | goto fail; | 3821 | goto fail; |
3818 | } | 3822 | } |
@@ -3821,7 +3825,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3821 | if (cachep->shared) { | 3825 | if (cachep->shared) { |
3822 | new_shared = alloc_arraycache(node, | 3826 | new_shared = alloc_arraycache(node, |
3823 | cachep->shared*cachep->batchcount, | 3827 | cachep->shared*cachep->batchcount, |
3824 | 0xbaadf00d); | 3828 | 0xbaadf00d, gfp); |
3825 | if (!new_shared) { | 3829 | if (!new_shared) { |
3826 | free_alien_cache(new_alien); | 3830 | free_alien_cache(new_alien); |
3827 | goto fail; | 3831 | goto fail; |
@@ -3850,7 +3854,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3850 | free_alien_cache(new_alien); | 3854 | free_alien_cache(new_alien); |
3851 | continue; | 3855 | continue; |
3852 | } | 3856 | } |
3853 | l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); | 3857 | l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node); |
3854 | if (!l3) { | 3858 | if (!l3) { |
3855 | free_alien_cache(new_alien); | 3859 | free_alien_cache(new_alien); |
3856 | kfree(new_shared); | 3860 | kfree(new_shared); |
@@ -3906,18 +3910,18 @@ static void do_ccupdate_local(void *info) | |||
3906 | 3910 | ||
3907 | /* Always called with the cache_chain_mutex held */ | 3911 | /* Always called with the cache_chain_mutex held */ |
3908 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | 3912 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, |
3909 | int batchcount, int shared) | 3913 | int batchcount, int shared, gfp_t gfp) |
3910 | { | 3914 | { |
3911 | struct ccupdate_struct *new; | 3915 | struct ccupdate_struct *new; |
3912 | int i; | 3916 | int i; |
3913 | 3917 | ||
3914 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 3918 | new = kzalloc(sizeof(*new), gfp); |
3915 | if (!new) | 3919 | if (!new) |
3916 | return -ENOMEM; | 3920 | return -ENOMEM; |
3917 | 3921 | ||
3918 | for_each_online_cpu(i) { | 3922 | for_each_online_cpu(i) { |
3919 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, | 3923 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, |
3920 | batchcount); | 3924 | batchcount, gfp); |
3921 | if (!new->new[i]) { | 3925 | if (!new->new[i]) { |
3922 | for (i--; i >= 0; i--) | 3926 | for (i--; i >= 0; i--) |
3923 | kfree(new->new[i]); | 3927 | kfree(new->new[i]); |
@@ -3944,11 +3948,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3944 | kfree(ccold); | 3948 | kfree(ccold); |
3945 | } | 3949 | } |
3946 | kfree(new); | 3950 | kfree(new); |
3947 | return alloc_kmemlist(cachep); | 3951 | return alloc_kmemlist(cachep, gfp); |
3948 | } | 3952 | } |
3949 | 3953 | ||
3950 | /* Called with cache_chain_mutex held always */ | 3954 | /* Called with cache_chain_mutex held always */ |
3951 | static int enable_cpucache(struct kmem_cache *cachep) | 3955 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) |
3952 | { | 3956 | { |
3953 | int err; | 3957 | int err; |
3954 | int limit, shared; | 3958 | int limit, shared; |
@@ -3994,7 +3998,7 @@ static int enable_cpucache(struct kmem_cache *cachep) | |||
3994 | if (limit > 32) | 3998 | if (limit > 32) |
3995 | limit = 32; | 3999 | limit = 32; |
3996 | #endif | 4000 | #endif |
3997 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); | 4001 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); |
3998 | if (err) | 4002 | if (err) |
3999 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | 4003 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", |
4000 | cachep->name, -err); | 4004 | cachep->name, -err); |
@@ -4300,7 +4304,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, | |||
4300 | res = 0; | 4304 | res = 0; |
4301 | } else { | 4305 | } else { |
4302 | res = do_tune_cpucache(cachep, limit, | 4306 | res = do_tune_cpucache(cachep, limit, |
4303 | batchcount, shared); | 4307 | batchcount, shared, |
4308 | GFP_KERNEL); | ||
4304 | } | 4309 | } |
4305 | break; | 4310 | break; |
4306 | } | 4311 | } |