diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 12 | ||||
-rw-r--r-- | mm/page_cgroup.c | 12 | ||||
-rw-r--r-- | mm/slab.c | 85 | ||||
-rw-r--r-- | mm/slub.c | 17 | ||||
-rw-r--r-- | mm/vmalloc.c | 3 |
5 files changed, 76 insertions, 53 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index daf92713f7de..282df0a09e6f 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -532,6 +532,9 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, | |||
532 | unsigned long size, unsigned long align, | 532 | unsigned long size, unsigned long align, |
533 | unsigned long goal, unsigned long limit) | 533 | unsigned long goal, unsigned long limit) |
534 | { | 534 | { |
535 | if (WARN_ON_ONCE(slab_is_available())) | ||
536 | return kzalloc(size, GFP_NOWAIT); | ||
537 | |||
535 | #ifdef CONFIG_HAVE_ARCH_BOOTMEM | 538 | #ifdef CONFIG_HAVE_ARCH_BOOTMEM |
536 | bootmem_data_t *p_bdata; | 539 | bootmem_data_t *p_bdata; |
537 | 540 | ||
@@ -662,6 +665,9 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, | |||
662 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | 665 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
663 | unsigned long align, unsigned long goal) | 666 | unsigned long align, unsigned long goal) |
664 | { | 667 | { |
668 | if (WARN_ON_ONCE(slab_is_available())) | ||
669 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | ||
670 | |||
665 | return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); | 671 | return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); |
666 | } | 672 | } |
667 | 673 | ||
@@ -693,6 +699,9 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, | |||
693 | { | 699 | { |
694 | void *ptr; | 700 | void *ptr; |
695 | 701 | ||
702 | if (WARN_ON_ONCE(slab_is_available())) | ||
703 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | ||
704 | |||
696 | ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); | 705 | ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); |
697 | if (ptr) | 706 | if (ptr) |
698 | return ptr; | 707 | return ptr; |
@@ -745,6 +754,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, | |||
745 | void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, | 754 | void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, |
746 | unsigned long align, unsigned long goal) | 755 | unsigned long align, unsigned long goal) |
747 | { | 756 | { |
757 | if (WARN_ON_ONCE(slab_is_available())) | ||
758 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | ||
759 | |||
748 | return ___alloc_bootmem_node(pgdat->bdata, size, align, | 760 | return ___alloc_bootmem_node(pgdat->bdata, size, align, |
749 | goal, ARCH_LOW_ADDRESS_LIMIT); | 761 | goal, ARCH_LOW_ADDRESS_LIMIT); |
750 | } | 762 | } |
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 791905c991df..3dd4a909a1de 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -47,6 +47,8 @@ static int __init alloc_node_page_cgroup(int nid) | |||
47 | struct page_cgroup *base, *pc; | 47 | struct page_cgroup *base, *pc; |
48 | unsigned long table_size; | 48 | unsigned long table_size; |
49 | unsigned long start_pfn, nr_pages, index; | 49 | unsigned long start_pfn, nr_pages, index; |
50 | struct page *page; | ||
51 | unsigned int order; | ||
50 | 52 | ||
51 | start_pfn = NODE_DATA(nid)->node_start_pfn; | 53 | start_pfn = NODE_DATA(nid)->node_start_pfn; |
52 | nr_pages = NODE_DATA(nid)->node_spanned_pages; | 54 | nr_pages = NODE_DATA(nid)->node_spanned_pages; |
@@ -55,11 +57,13 @@ static int __init alloc_node_page_cgroup(int nid) | |||
55 | return 0; | 57 | return 0; |
56 | 58 | ||
57 | table_size = sizeof(struct page_cgroup) * nr_pages; | 59 | table_size = sizeof(struct page_cgroup) * nr_pages; |
58 | 60 | order = get_order(table_size); | |
59 | base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), | 61 | page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order); |
60 | table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 62 | if (!page) |
61 | if (!base) | 63 | page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order); |
64 | if (!page) | ||
62 | return -ENOMEM; | 65 | return -ENOMEM; |
66 | base = page_address(page); | ||
63 | for (index = 0; index < nr_pages; index++) { | 67 | for (index = 0; index < nr_pages; index++) { |
64 | pc = base + index; | 68 | pc = base + index; |
65 | __init_page_cgroup(pc, start_pfn + index); | 69 | __init_page_cgroup(pc, start_pfn + index); |
@@ -315,7 +315,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
315 | struct kmem_list3 *l3, int tofree); | 315 | struct kmem_list3 *l3, int tofree); |
316 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 316 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
317 | int node); | 317 | int node); |
318 | static int enable_cpucache(struct kmem_cache *cachep); | 318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); |
319 | static void cache_reap(struct work_struct *unused); | 319 | static void cache_reap(struct work_struct *unused); |
320 | 320 | ||
321 | /* | 321 | /* |
@@ -958,12 +958,12 @@ static void __cpuinit start_cpu_timer(int cpu) | |||
958 | } | 958 | } |
959 | 959 | ||
960 | static struct array_cache *alloc_arraycache(int node, int entries, | 960 | static struct array_cache *alloc_arraycache(int node, int entries, |
961 | int batchcount) | 961 | int batchcount, gfp_t gfp) |
962 | { | 962 | { |
963 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); | 963 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); |
964 | struct array_cache *nc = NULL; | 964 | struct array_cache *nc = NULL; |
965 | 965 | ||
966 | nc = kmalloc_node(memsize, GFP_KERNEL, node); | 966 | nc = kmalloc_node(memsize, gfp, node); |
967 | if (nc) { | 967 | if (nc) { |
968 | nc->avail = 0; | 968 | nc->avail = 0; |
969 | nc->limit = entries; | 969 | nc->limit = entries; |
@@ -1003,7 +1003,7 @@ static int transfer_objects(struct array_cache *to, | |||
1003 | #define drain_alien_cache(cachep, alien) do { } while (0) | 1003 | #define drain_alien_cache(cachep, alien) do { } while (0) |
1004 | #define reap_alien(cachep, l3) do { } while (0) | 1004 | #define reap_alien(cachep, l3) do { } while (0) |
1005 | 1005 | ||
1006 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 1006 | static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
1007 | { | 1007 | { |
1008 | return (struct array_cache **)BAD_ALIEN_MAGIC; | 1008 | return (struct array_cache **)BAD_ALIEN_MAGIC; |
1009 | } | 1009 | } |
@@ -1034,7 +1034,7 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep, | |||
1034 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); | 1034 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); |
1035 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | 1035 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); |
1036 | 1036 | ||
1037 | static struct array_cache **alloc_alien_cache(int node, int limit) | 1037 | static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
1038 | { | 1038 | { |
1039 | struct array_cache **ac_ptr; | 1039 | struct array_cache **ac_ptr; |
1040 | int memsize = sizeof(void *) * nr_node_ids; | 1040 | int memsize = sizeof(void *) * nr_node_ids; |
@@ -1042,14 +1042,14 @@ static struct array_cache **alloc_alien_cache(int node, int limit) | |||
1042 | 1042 | ||
1043 | if (limit > 1) | 1043 | if (limit > 1) |
1044 | limit = 12; | 1044 | limit = 12; |
1045 | ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); | 1045 | ac_ptr = kmalloc_node(memsize, gfp, node); |
1046 | if (ac_ptr) { | 1046 | if (ac_ptr) { |
1047 | for_each_node(i) { | 1047 | for_each_node(i) { |
1048 | if (i == node || !node_online(i)) { | 1048 | if (i == node || !node_online(i)) { |
1049 | ac_ptr[i] = NULL; | 1049 | ac_ptr[i] = NULL; |
1050 | continue; | 1050 | continue; |
1051 | } | 1051 | } |
1052 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); | 1052 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); |
1053 | if (!ac_ptr[i]) { | 1053 | if (!ac_ptr[i]) { |
1054 | for (i--; i >= 0; i--) | 1054 | for (i--; i >= 0; i--) |
1055 | kfree(ac_ptr[i]); | 1055 | kfree(ac_ptr[i]); |
@@ -1282,20 +1282,20 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1282 | struct array_cache **alien = NULL; | 1282 | struct array_cache **alien = NULL; |
1283 | 1283 | ||
1284 | nc = alloc_arraycache(node, cachep->limit, | 1284 | nc = alloc_arraycache(node, cachep->limit, |
1285 | cachep->batchcount); | 1285 | cachep->batchcount, GFP_KERNEL); |
1286 | if (!nc) | 1286 | if (!nc) |
1287 | goto bad; | 1287 | goto bad; |
1288 | if (cachep->shared) { | 1288 | if (cachep->shared) { |
1289 | shared = alloc_arraycache(node, | 1289 | shared = alloc_arraycache(node, |
1290 | cachep->shared * cachep->batchcount, | 1290 | cachep->shared * cachep->batchcount, |
1291 | 0xbaadf00d); | 1291 | 0xbaadf00d, GFP_KERNEL); |
1292 | if (!shared) { | 1292 | if (!shared) { |
1293 | kfree(nc); | 1293 | kfree(nc); |
1294 | goto bad; | 1294 | goto bad; |
1295 | } | 1295 | } |
1296 | } | 1296 | } |
1297 | if (use_alien_caches) { | 1297 | if (use_alien_caches) { |
1298 | alien = alloc_alien_cache(node, cachep->limit); | 1298 | alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); |
1299 | if (!alien) { | 1299 | if (!alien) { |
1300 | kfree(shared); | 1300 | kfree(shared); |
1301 | kfree(nc); | 1301 | kfree(nc); |
@@ -1399,10 +1399,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | |||
1399 | { | 1399 | { |
1400 | struct kmem_list3 *ptr; | 1400 | struct kmem_list3 *ptr; |
1401 | 1401 | ||
1402 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); | 1402 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid); |
1403 | BUG_ON(!ptr); | 1403 | BUG_ON(!ptr); |
1404 | 1404 | ||
1405 | local_irq_disable(); | ||
1406 | memcpy(ptr, list, sizeof(struct kmem_list3)); | 1405 | memcpy(ptr, list, sizeof(struct kmem_list3)); |
1407 | /* | 1406 | /* |
1408 | * Do not assume that spinlocks can be initialized via memcpy: | 1407 | * Do not assume that spinlocks can be initialized via memcpy: |
@@ -1411,7 +1410,6 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | |||
1411 | 1410 | ||
1412 | MAKE_ALL_LISTS(cachep, ptr, nodeid); | 1411 | MAKE_ALL_LISTS(cachep, ptr, nodeid); |
1413 | cachep->nodelists[nodeid] = ptr; | 1412 | cachep->nodelists[nodeid] = ptr; |
1414 | local_irq_enable(); | ||
1415 | } | 1413 | } |
1416 | 1414 | ||
1417 | /* | 1415 | /* |
@@ -1575,9 +1573,8 @@ void __init kmem_cache_init(void) | |||
1575 | { | 1573 | { |
1576 | struct array_cache *ptr; | 1574 | struct array_cache *ptr; |
1577 | 1575 | ||
1578 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1576 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1579 | 1577 | ||
1580 | local_irq_disable(); | ||
1581 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 1578 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); |
1582 | memcpy(ptr, cpu_cache_get(&cache_cache), | 1579 | memcpy(ptr, cpu_cache_get(&cache_cache), |
1583 | sizeof(struct arraycache_init)); | 1580 | sizeof(struct arraycache_init)); |
@@ -1587,11 +1584,9 @@ void __init kmem_cache_init(void) | |||
1587 | spin_lock_init(&ptr->lock); | 1584 | spin_lock_init(&ptr->lock); |
1588 | 1585 | ||
1589 | cache_cache.array[smp_processor_id()] = ptr; | 1586 | cache_cache.array[smp_processor_id()] = ptr; |
1590 | local_irq_enable(); | ||
1591 | 1587 | ||
1592 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1588 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1593 | 1589 | ||
1594 | local_irq_disable(); | ||
1595 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) | 1590 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) |
1596 | != &initarray_generic.cache); | 1591 | != &initarray_generic.cache); |
1597 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 1592 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), |
@@ -1603,7 +1598,6 @@ void __init kmem_cache_init(void) | |||
1603 | 1598 | ||
1604 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 1599 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = |
1605 | ptr; | 1600 | ptr; |
1606 | local_irq_enable(); | ||
1607 | } | 1601 | } |
1608 | /* 5) Replace the bootstrap kmem_list3's */ | 1602 | /* 5) Replace the bootstrap kmem_list3's */ |
1609 | { | 1603 | { |
@@ -1627,7 +1621,7 @@ void __init kmem_cache_init(void) | |||
1627 | struct kmem_cache *cachep; | 1621 | struct kmem_cache *cachep; |
1628 | mutex_lock(&cache_chain_mutex); | 1622 | mutex_lock(&cache_chain_mutex); |
1629 | list_for_each_entry(cachep, &cache_chain, next) | 1623 | list_for_each_entry(cachep, &cache_chain, next) |
1630 | if (enable_cpucache(cachep)) | 1624 | if (enable_cpucache(cachep, GFP_NOWAIT)) |
1631 | BUG(); | 1625 | BUG(); |
1632 | mutex_unlock(&cache_chain_mutex); | 1626 | mutex_unlock(&cache_chain_mutex); |
1633 | } | 1627 | } |
@@ -2064,10 +2058,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2064 | return left_over; | 2058 | return left_over; |
2065 | } | 2059 | } |
2066 | 2060 | ||
2067 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | 2061 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) |
2068 | { | 2062 | { |
2069 | if (g_cpucache_up == FULL) | 2063 | if (g_cpucache_up == FULL) |
2070 | return enable_cpucache(cachep); | 2064 | return enable_cpucache(cachep, gfp); |
2071 | 2065 | ||
2072 | if (g_cpucache_up == NONE) { | 2066 | if (g_cpucache_up == NONE) { |
2073 | /* | 2067 | /* |
@@ -2089,7 +2083,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | |||
2089 | g_cpucache_up = PARTIAL_AC; | 2083 | g_cpucache_up = PARTIAL_AC; |
2090 | } else { | 2084 | } else { |
2091 | cachep->array[smp_processor_id()] = | 2085 | cachep->array[smp_processor_id()] = |
2092 | kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 2086 | kmalloc(sizeof(struct arraycache_init), gfp); |
2093 | 2087 | ||
2094 | if (g_cpucache_up == PARTIAL_AC) { | 2088 | if (g_cpucache_up == PARTIAL_AC) { |
2095 | set_up_list3s(cachep, SIZE_L3); | 2089 | set_up_list3s(cachep, SIZE_L3); |
@@ -2153,6 +2147,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2153 | { | 2147 | { |
2154 | size_t left_over, slab_size, ralign; | 2148 | size_t left_over, slab_size, ralign; |
2155 | struct kmem_cache *cachep = NULL, *pc; | 2149 | struct kmem_cache *cachep = NULL, *pc; |
2150 | gfp_t gfp; | ||
2156 | 2151 | ||
2157 | /* | 2152 | /* |
2158 | * Sanity checks... these are all serious usage bugs. | 2153 | * Sanity checks... these are all serious usage bugs. |
@@ -2168,8 +2163,10 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2168 | * We use cache_chain_mutex to ensure a consistent view of | 2163 | * We use cache_chain_mutex to ensure a consistent view of |
2169 | * cpu_online_mask as well. Please see cpuup_callback | 2164 | * cpu_online_mask as well. Please see cpuup_callback |
2170 | */ | 2165 | */ |
2171 | get_online_cpus(); | 2166 | if (slab_is_available()) { |
2172 | mutex_lock(&cache_chain_mutex); | 2167 | get_online_cpus(); |
2168 | mutex_lock(&cache_chain_mutex); | ||
2169 | } | ||
2173 | 2170 | ||
2174 | list_for_each_entry(pc, &cache_chain, next) { | 2171 | list_for_each_entry(pc, &cache_chain, next) { |
2175 | char tmp; | 2172 | char tmp; |
@@ -2278,8 +2275,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2278 | */ | 2275 | */ |
2279 | align = ralign; | 2276 | align = ralign; |
2280 | 2277 | ||
2278 | if (slab_is_available()) | ||
2279 | gfp = GFP_KERNEL; | ||
2280 | else | ||
2281 | gfp = GFP_NOWAIT; | ||
2282 | |||
2281 | /* Get cache's description obj. */ | 2283 | /* Get cache's description obj. */ |
2282 | cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); | 2284 | cachep = kmem_cache_zalloc(&cache_cache, gfp); |
2283 | if (!cachep) | 2285 | if (!cachep) |
2284 | goto oops; | 2286 | goto oops; |
2285 | 2287 | ||
@@ -2382,7 +2384,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2382 | cachep->ctor = ctor; | 2384 | cachep->ctor = ctor; |
2383 | cachep->name = name; | 2385 | cachep->name = name; |
2384 | 2386 | ||
2385 | if (setup_cpu_cache(cachep)) { | 2387 | if (setup_cpu_cache(cachep, gfp)) { |
2386 | __kmem_cache_destroy(cachep); | 2388 | __kmem_cache_destroy(cachep); |
2387 | cachep = NULL; | 2389 | cachep = NULL; |
2388 | goto oops; | 2390 | goto oops; |
@@ -2394,8 +2396,10 @@ oops: | |||
2394 | if (!cachep && (flags & SLAB_PANIC)) | 2396 | if (!cachep && (flags & SLAB_PANIC)) |
2395 | panic("kmem_cache_create(): failed to create slab `%s'\n", | 2397 | panic("kmem_cache_create(): failed to create slab `%s'\n", |
2396 | name); | 2398 | name); |
2397 | mutex_unlock(&cache_chain_mutex); | 2399 | if (slab_is_available()) { |
2398 | put_online_cpus(); | 2400 | mutex_unlock(&cache_chain_mutex); |
2401 | put_online_cpus(); | ||
2402 | } | ||
2399 | return cachep; | 2403 | return cachep; |
2400 | } | 2404 | } |
2401 | EXPORT_SYMBOL(kmem_cache_create); | 2405 | EXPORT_SYMBOL(kmem_cache_create); |
@@ -3802,7 +3806,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name); | |||
3802 | /* | 3806 | /* |
3803 | * This initializes kmem_list3 or resizes various caches for all nodes. | 3807 | * This initializes kmem_list3 or resizes various caches for all nodes. |
3804 | */ | 3808 | */ |
3805 | static int alloc_kmemlist(struct kmem_cache *cachep) | 3809 | static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) |
3806 | { | 3810 | { |
3807 | int node; | 3811 | int node; |
3808 | struct kmem_list3 *l3; | 3812 | struct kmem_list3 *l3; |
@@ -3812,7 +3816,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3812 | for_each_online_node(node) { | 3816 | for_each_online_node(node) { |
3813 | 3817 | ||
3814 | if (use_alien_caches) { | 3818 | if (use_alien_caches) { |
3815 | new_alien = alloc_alien_cache(node, cachep->limit); | 3819 | new_alien = alloc_alien_cache(node, cachep->limit, gfp); |
3816 | if (!new_alien) | 3820 | if (!new_alien) |
3817 | goto fail; | 3821 | goto fail; |
3818 | } | 3822 | } |
@@ -3821,7 +3825,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3821 | if (cachep->shared) { | 3825 | if (cachep->shared) { |
3822 | new_shared = alloc_arraycache(node, | 3826 | new_shared = alloc_arraycache(node, |
3823 | cachep->shared*cachep->batchcount, | 3827 | cachep->shared*cachep->batchcount, |
3824 | 0xbaadf00d); | 3828 | 0xbaadf00d, gfp); |
3825 | if (!new_shared) { | 3829 | if (!new_shared) { |
3826 | free_alien_cache(new_alien); | 3830 | free_alien_cache(new_alien); |
3827 | goto fail; | 3831 | goto fail; |
@@ -3850,7 +3854,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3850 | free_alien_cache(new_alien); | 3854 | free_alien_cache(new_alien); |
3851 | continue; | 3855 | continue; |
3852 | } | 3856 | } |
3853 | l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); | 3857 | l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node); |
3854 | if (!l3) { | 3858 | if (!l3) { |
3855 | free_alien_cache(new_alien); | 3859 | free_alien_cache(new_alien); |
3856 | kfree(new_shared); | 3860 | kfree(new_shared); |
@@ -3906,18 +3910,18 @@ static void do_ccupdate_local(void *info) | |||
3906 | 3910 | ||
3907 | /* Always called with the cache_chain_mutex held */ | 3911 | /* Always called with the cache_chain_mutex held */ |
3908 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | 3912 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, |
3909 | int batchcount, int shared) | 3913 | int batchcount, int shared, gfp_t gfp) |
3910 | { | 3914 | { |
3911 | struct ccupdate_struct *new; | 3915 | struct ccupdate_struct *new; |
3912 | int i; | 3916 | int i; |
3913 | 3917 | ||
3914 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 3918 | new = kzalloc(sizeof(*new), gfp); |
3915 | if (!new) | 3919 | if (!new) |
3916 | return -ENOMEM; | 3920 | return -ENOMEM; |
3917 | 3921 | ||
3918 | for_each_online_cpu(i) { | 3922 | for_each_online_cpu(i) { |
3919 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, | 3923 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, |
3920 | batchcount); | 3924 | batchcount, gfp); |
3921 | if (!new->new[i]) { | 3925 | if (!new->new[i]) { |
3922 | for (i--; i >= 0; i--) | 3926 | for (i--; i >= 0; i--) |
3923 | kfree(new->new[i]); | 3927 | kfree(new->new[i]); |
@@ -3944,11 +3948,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3944 | kfree(ccold); | 3948 | kfree(ccold); |
3945 | } | 3949 | } |
3946 | kfree(new); | 3950 | kfree(new); |
3947 | return alloc_kmemlist(cachep); | 3951 | return alloc_kmemlist(cachep, gfp); |
3948 | } | 3952 | } |
3949 | 3953 | ||
3950 | /* Called with cache_chain_mutex held always */ | 3954 | /* Called with cache_chain_mutex held always */ |
3951 | static int enable_cpucache(struct kmem_cache *cachep) | 3955 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) |
3952 | { | 3956 | { |
3953 | int err; | 3957 | int err; |
3954 | int limit, shared; | 3958 | int limit, shared; |
@@ -3994,7 +3998,7 @@ static int enable_cpucache(struct kmem_cache *cachep) | |||
3994 | if (limit > 32) | 3998 | if (limit > 32) |
3995 | limit = 32; | 3999 | limit = 32; |
3996 | #endif | 4000 | #endif |
3997 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); | 4001 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); |
3998 | if (err) | 4002 | if (err) |
3999 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | 4003 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", |
4000 | cachep->name, -err); | 4004 | cachep->name, -err); |
@@ -4300,7 +4304,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, | |||
4300 | res = 0; | 4304 | res = 0; |
4301 | } else { | 4305 | } else { |
4302 | res = do_tune_cpucache(cachep, limit, | 4306 | res = do_tune_cpucache(cachep, limit, |
4303 | batchcount, shared); | 4307 | batchcount, shared, |
4308 | GFP_KERNEL); | ||
4304 | } | 4309 | } |
4305 | break; | 4310 | break; |
4306 | } | 4311 | } |
@@ -2557,13 +2557,16 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | |||
2557 | if (gfp_flags & SLUB_DMA) | 2557 | if (gfp_flags & SLUB_DMA) |
2558 | flags = SLAB_CACHE_DMA; | 2558 | flags = SLAB_CACHE_DMA; |
2559 | 2559 | ||
2560 | down_write(&slub_lock); | 2560 | /* |
2561 | * This function is called with IRQs disabled during early-boot on | ||
2562 | * single CPU so there's no need to take slub_lock here. | ||
2563 | */ | ||
2561 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2564 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, |
2562 | flags, NULL)) | 2565 | flags, NULL)) |
2563 | goto panic; | 2566 | goto panic; |
2564 | 2567 | ||
2565 | list_add(&s->list, &slab_caches); | 2568 | list_add(&s->list, &slab_caches); |
2566 | up_write(&slub_lock); | 2569 | |
2567 | if (sysfs_slab_add(s)) | 2570 | if (sysfs_slab_add(s)) |
2568 | goto panic; | 2571 | goto panic; |
2569 | return s; | 2572 | return s; |
@@ -3021,7 +3024,7 @@ void __init kmem_cache_init(void) | |||
3021 | * kmem_cache_open for slab_state == DOWN. | 3024 | * kmem_cache_open for slab_state == DOWN. |
3022 | */ | 3025 | */ |
3023 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", | 3026 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", |
3024 | sizeof(struct kmem_cache_node), GFP_KERNEL); | 3027 | sizeof(struct kmem_cache_node), GFP_NOWAIT); |
3025 | kmalloc_caches[0].refcount = -1; | 3028 | kmalloc_caches[0].refcount = -1; |
3026 | caches++; | 3029 | caches++; |
3027 | 3030 | ||
@@ -3034,16 +3037,16 @@ void __init kmem_cache_init(void) | |||
3034 | /* Caches that are not of the two-to-the-power-of size */ | 3037 | /* Caches that are not of the two-to-the-power-of size */ |
3035 | if (KMALLOC_MIN_SIZE <= 64) { | 3038 | if (KMALLOC_MIN_SIZE <= 64) { |
3036 | create_kmalloc_cache(&kmalloc_caches[1], | 3039 | create_kmalloc_cache(&kmalloc_caches[1], |
3037 | "kmalloc-96", 96, GFP_KERNEL); | 3040 | "kmalloc-96", 96, GFP_NOWAIT); |
3038 | caches++; | 3041 | caches++; |
3039 | create_kmalloc_cache(&kmalloc_caches[2], | 3042 | create_kmalloc_cache(&kmalloc_caches[2], |
3040 | "kmalloc-192", 192, GFP_KERNEL); | 3043 | "kmalloc-192", 192, GFP_NOWAIT); |
3041 | caches++; | 3044 | caches++; |
3042 | } | 3045 | } |
3043 | 3046 | ||
3044 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | 3047 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
3045 | create_kmalloc_cache(&kmalloc_caches[i], | 3048 | create_kmalloc_cache(&kmalloc_caches[i], |
3046 | "kmalloc", 1 << i, GFP_KERNEL); | 3049 | "kmalloc", 1 << i, GFP_NOWAIT); |
3047 | caches++; | 3050 | caches++; |
3048 | } | 3051 | } |
3049 | 3052 | ||
@@ -3080,7 +3083,7 @@ void __init kmem_cache_init(void) | |||
3080 | /* Provide the correct kmalloc names now that the caches are up */ | 3083 | /* Provide the correct kmalloc names now that the caches are up */ |
3081 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) | 3084 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
3082 | kmalloc_caches[i]. name = | 3085 | kmalloc_caches[i]. name = |
3083 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3086 | kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); |
3084 | 3087 | ||
3085 | #ifdef CONFIG_SMP | 3088 | #ifdef CONFIG_SMP |
3086 | register_cpu_notifier(&slab_notifier); | 3089 | register_cpu_notifier(&slab_notifier); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 083716ea38c9..323513858c20 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/rbtree.h> | 23 | #include <linux/rbtree.h> |
24 | #include <linux/radix-tree.h> | 24 | #include <linux/radix-tree.h> |
25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
26 | #include <linux/bootmem.h> | ||
27 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
28 | 27 | ||
29 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
@@ -1032,7 +1031,7 @@ void __init vmalloc_init(void) | |||
1032 | 1031 | ||
1033 | /* Import existing vmlist entries. */ | 1032 | /* Import existing vmlist entries. */ |
1034 | for (tmp = vmlist; tmp; tmp = tmp->next) { | 1033 | for (tmp = vmlist; tmp; tmp = tmp->next) { |
1035 | va = alloc_bootmem(sizeof(struct vmap_area)); | 1034 | va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); |
1036 | va->flags = tmp->flags | VM_VM_AREA; | 1035 | va->flags = tmp->flags | VM_VM_AREA; |
1037 | va->va_start = (unsigned long)tmp->addr; | 1036 | va->va_start = (unsigned long)tmp->addr; |
1038 | va->va_end = va->va_start + tmp->size; | 1037 | va->va_end = va->va_start + tmp->size; |