diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 19 | ||||
-rw-r--r-- | mm/bootmem.c | 12 | ||||
-rw-r--r-- | mm/bounce.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 8 | ||||
-rw-r--r-- | mm/mprotect.c | 2 | ||||
-rw-r--r-- | mm/nommu.c | 3 | ||||
-rw-r--r-- | mm/page_cgroup.c | 12 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 85 | ||||
-rw-r--r-- | mm/slub.c | 17 | ||||
-rw-r--r-- | mm/vmalloc.c | 3 |
11 files changed, 111 insertions, 56 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index c2b57d81e153..71830ba7b986 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -226,6 +226,25 @@ config HAVE_MLOCKED_PAGE_BIT | |||
226 | config MMU_NOTIFIER | 226 | config MMU_NOTIFIER |
227 | bool | 227 | bool |
228 | 228 | ||
229 | config DEFAULT_MMAP_MIN_ADDR | ||
230 | int "Low address space to protect from user allocation" | ||
231 | default 4096 | ||
232 | help | ||
233 | This is the portion of low virtual memory which should be protected | ||
234 | from userspace allocation. Keeping a user from writing to low pages | ||
235 | can help reduce the impact of kernel NULL pointer bugs. | ||
236 | |||
237 | For most ia64, ppc64 and x86 users with lots of address space | ||
238 | a value of 65536 is reasonable and should cause no problems. | ||
239 | On arm and other archs it should not be higher than 32768. | ||
240 | Programs which use vm86 functionality would either need additional | ||
241 | permissions from either the LSM or the capabilities module or have | ||
242 | this protection disabled. | ||
243 | |||
244 | This value can be changed after boot using the | ||
245 | /proc/sys/vm/mmap_min_addr tunable. | ||
246 | |||
247 | |||
229 | config NOMMU_INITIAL_TRIM_EXCESS | 248 | config NOMMU_INITIAL_TRIM_EXCESS |
230 | int "Turn on mmap() excess space trimming before booting" | 249 | int "Turn on mmap() excess space trimming before booting" |
231 | depends on !MMU | 250 | depends on !MMU |
diff --git a/mm/bootmem.c b/mm/bootmem.c index daf92713f7de..282df0a09e6f 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -532,6 +532,9 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, | |||
532 | unsigned long size, unsigned long align, | 532 | unsigned long size, unsigned long align, |
533 | unsigned long goal, unsigned long limit) | 533 | unsigned long goal, unsigned long limit) |
534 | { | 534 | { |
535 | if (WARN_ON_ONCE(slab_is_available())) | ||
536 | return kzalloc(size, GFP_NOWAIT); | ||
537 | |||
535 | #ifdef CONFIG_HAVE_ARCH_BOOTMEM | 538 | #ifdef CONFIG_HAVE_ARCH_BOOTMEM |
536 | bootmem_data_t *p_bdata; | 539 | bootmem_data_t *p_bdata; |
537 | 540 | ||
@@ -662,6 +665,9 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, | |||
662 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | 665 | void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
663 | unsigned long align, unsigned long goal) | 666 | unsigned long align, unsigned long goal) |
664 | { | 667 | { |
668 | if (WARN_ON_ONCE(slab_is_available())) | ||
669 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | ||
670 | |||
665 | return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); | 671 | return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); |
666 | } | 672 | } |
667 | 673 | ||
@@ -693,6 +699,9 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, | |||
693 | { | 699 | { |
694 | void *ptr; | 700 | void *ptr; |
695 | 701 | ||
702 | if (WARN_ON_ONCE(slab_is_available())) | ||
703 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | ||
704 | |||
696 | ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); | 705 | ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); |
697 | if (ptr) | 706 | if (ptr) |
698 | return ptr; | 707 | return ptr; |
@@ -745,6 +754,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, | |||
745 | void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, | 754 | void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, |
746 | unsigned long align, unsigned long goal) | 755 | unsigned long align, unsigned long goal) |
747 | { | 756 | { |
757 | if (WARN_ON_ONCE(slab_is_available())) | ||
758 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | ||
759 | |||
748 | return ___alloc_bootmem_node(pgdat->bdata, size, align, | 760 | return ___alloc_bootmem_node(pgdat->bdata, size, align, |
749 | goal, ARCH_LOW_ADDRESS_LIMIT); | 761 | goal, ARCH_LOW_ADDRESS_LIMIT); |
750 | } | 762 | } |
diff --git a/mm/bounce.c b/mm/bounce.c index 65f5e17e411a..4ebe3ea83795 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
@@ -191,7 +191,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |||
191 | /* | 191 | /* |
192 | * is destination page below bounce pfn? | 192 | * is destination page below bounce pfn? |
193 | */ | 193 | */ |
194 | if (page_to_pfn(page) <= q->bounce_pfn) | 194 | if (page_to_pfn(page) <= queue_bounce_pfn(q)) |
195 | continue; | 195 | continue; |
196 | 196 | ||
197 | /* | 197 | /* |
@@ -283,7 +283,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) | |||
283 | * don't waste time iterating over bio segments | 283 | * don't waste time iterating over bio segments |
284 | */ | 284 | */ |
285 | if (!(q->bounce_gfp & GFP_DMA)) { | 285 | if (!(q->bounce_gfp & GFP_DMA)) { |
286 | if (q->bounce_pfn >= blk_max_pfn) | 286 | if (queue_bounce_pfn(q) >= blk_max_pfn) |
287 | return; | 287 | return; |
288 | pool = page_pool; | 288 | pool = page_pool; |
289 | } else { | 289 | } else { |
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/mempolicy.h> | 28 | #include <linux/mempolicy.h> |
29 | #include <linux/rmap.h> | 29 | #include <linux/rmap.h> |
30 | #include <linux/mmu_notifier.h> | 30 | #include <linux/mmu_notifier.h> |
31 | #include <linux/perf_counter.h> | ||
31 | 32 | ||
32 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
33 | #include <asm/cacheflush.h> | 34 | #include <asm/cacheflush.h> |
@@ -87,6 +88,9 @@ int sysctl_overcommit_ratio = 50; /* default is 50% */ | |||
87 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | 88 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
88 | struct percpu_counter vm_committed_as; | 89 | struct percpu_counter vm_committed_as; |
89 | 90 | ||
91 | /* amount of vm to protect from userspace access */ | ||
92 | unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; | ||
93 | |||
90 | /* | 94 | /* |
91 | * Check that a process has enough memory to allocate a new virtual | 95 | * Check that a process has enough memory to allocate a new virtual |
92 | * mapping. 0 means there is enough memory for the allocation to | 96 | * mapping. 0 means there is enough memory for the allocation to |
@@ -1219,6 +1223,8 @@ munmap_back: | |||
1219 | if (correct_wcount) | 1223 | if (correct_wcount) |
1220 | atomic_inc(&inode->i_writecount); | 1224 | atomic_inc(&inode->i_writecount); |
1221 | out: | 1225 | out: |
1226 | perf_counter_mmap(vma); | ||
1227 | |||
1222 | mm->total_vm += len >> PAGE_SHIFT; | 1228 | mm->total_vm += len >> PAGE_SHIFT; |
1223 | vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); | 1229 | vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); |
1224 | if (vm_flags & VM_LOCKED) { | 1230 | if (vm_flags & VM_LOCKED) { |
@@ -2305,6 +2311,8 @@ int install_special_mapping(struct mm_struct *mm, | |||
2305 | 2311 | ||
2306 | mm->total_vm += len >> PAGE_SHIFT; | 2312 | mm->total_vm += len >> PAGE_SHIFT; |
2307 | 2313 | ||
2314 | perf_counter_mmap(vma); | ||
2315 | |||
2308 | return 0; | 2316 | return 0; |
2309 | } | 2317 | } |
2310 | 2318 | ||
diff --git a/mm/mprotect.c b/mm/mprotect.c index 258197b76fb4..d80311baeb2d 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/swapops.h> | 23 | #include <linux/swapops.h> |
24 | #include <linux/mmu_notifier.h> | 24 | #include <linux/mmu_notifier.h> |
25 | #include <linux/migrate.h> | 25 | #include <linux/migrate.h> |
26 | #include <linux/perf_counter.h> | ||
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
27 | #include <asm/pgtable.h> | 28 | #include <asm/pgtable.h> |
28 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
@@ -299,6 +300,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, | |||
299 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); | 300 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); |
300 | if (error) | 301 | if (error) |
301 | goto out; | 302 | goto out; |
303 | perf_counter_mmap(vma); | ||
302 | nstart = tmp; | 304 | nstart = tmp; |
303 | 305 | ||
304 | if (nstart < prev->vm_end) | 306 | if (nstart < prev->vm_end) |
diff --git a/mm/nommu.c b/mm/nommu.c index b571ef707428..2fd2ad5da98e 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -69,6 +69,9 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | |||
69 | int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; | 69 | int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; |
70 | int heap_stack_gap = 0; | 70 | int heap_stack_gap = 0; |
71 | 71 | ||
72 | /* amount of vm to protect from userspace access */ | ||
73 | unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; | ||
74 | |||
72 | atomic_long_t mmap_pages_allocated; | 75 | atomic_long_t mmap_pages_allocated; |
73 | 76 | ||
74 | EXPORT_SYMBOL(mem_map); | 77 | EXPORT_SYMBOL(mem_map); |
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 791905c991df..3dd4a909a1de 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -47,6 +47,8 @@ static int __init alloc_node_page_cgroup(int nid) | |||
47 | struct page_cgroup *base, *pc; | 47 | struct page_cgroup *base, *pc; |
48 | unsigned long table_size; | 48 | unsigned long table_size; |
49 | unsigned long start_pfn, nr_pages, index; | 49 | unsigned long start_pfn, nr_pages, index; |
50 | struct page *page; | ||
51 | unsigned int order; | ||
50 | 52 | ||
51 | start_pfn = NODE_DATA(nid)->node_start_pfn; | 53 | start_pfn = NODE_DATA(nid)->node_start_pfn; |
52 | nr_pages = NODE_DATA(nid)->node_spanned_pages; | 54 | nr_pages = NODE_DATA(nid)->node_spanned_pages; |
@@ -55,11 +57,13 @@ static int __init alloc_node_page_cgroup(int nid) | |||
55 | return 0; | 57 | return 0; |
56 | 58 | ||
57 | table_size = sizeof(struct page_cgroup) * nr_pages; | 59 | table_size = sizeof(struct page_cgroup) * nr_pages; |
58 | 60 | order = get_order(table_size); | |
59 | base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), | 61 | page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order); |
60 | table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 62 | if (!page) |
61 | if (!base) | 63 | page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order); |
64 | if (!page) | ||
62 | return -ENOMEM; | 65 | return -ENOMEM; |
66 | base = page_address(page); | ||
63 | for (index = 0; index < nr_pages; index++) { | 67 | for (index = 0; index < nr_pages; index++) { |
64 | pc = base + index; | 68 | pc = base + index; |
65 | __init_page_cgroup(pc, start_pfn + index); | 69 | __init_page_cgroup(pc, start_pfn + index); |
diff --git a/mm/shmem.c b/mm/shmem.c index b25f95ce3db7..0132fbd45a23 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2659,6 +2659,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | |||
2659 | if (error) | 2659 | if (error) |
2660 | goto close_file; | 2660 | goto close_file; |
2661 | #endif | 2661 | #endif |
2662 | ima_counts_get(file); | ||
2662 | return file; | 2663 | return file; |
2663 | 2664 | ||
2664 | close_file: | 2665 | close_file: |
@@ -2684,7 +2685,6 @@ int shmem_zero_setup(struct vm_area_struct *vma) | |||
2684 | if (IS_ERR(file)) | 2685 | if (IS_ERR(file)) |
2685 | return PTR_ERR(file); | 2686 | return PTR_ERR(file); |
2686 | 2687 | ||
2687 | ima_shm_check(file); | ||
2688 | if (vma->vm_file) | 2688 | if (vma->vm_file) |
2689 | fput(vma->vm_file); | 2689 | fput(vma->vm_file); |
2690 | vma->vm_file = file; | 2690 | vma->vm_file = file; |
@@ -316,7 +316,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
316 | struct kmem_list3 *l3, int tofree); | 316 | struct kmem_list3 *l3, int tofree); |
317 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 317 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
318 | int node); | 318 | int node); |
319 | static int enable_cpucache(struct kmem_cache *cachep); | 319 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); |
320 | static void cache_reap(struct work_struct *unused); | 320 | static void cache_reap(struct work_struct *unused); |
321 | 321 | ||
322 | /* | 322 | /* |
@@ -959,12 +959,12 @@ static void __cpuinit start_cpu_timer(int cpu) | |||
959 | } | 959 | } |
960 | 960 | ||
961 | static struct array_cache *alloc_arraycache(int node, int entries, | 961 | static struct array_cache *alloc_arraycache(int node, int entries, |
962 | int batchcount) | 962 | int batchcount, gfp_t gfp) |
963 | { | 963 | { |
964 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); | 964 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); |
965 | struct array_cache *nc = NULL; | 965 | struct array_cache *nc = NULL; |
966 | 966 | ||
967 | nc = kmalloc_node(memsize, GFP_KERNEL, node); | 967 | nc = kmalloc_node(memsize, gfp, node); |
968 | /* | 968 | /* |
969 | * The array_cache structures contain pointers to free object. | 969 | * The array_cache structures contain pointers to free object. |
970 | * However, when such objects are allocated or transfered to another | 970 | * However, when such objects are allocated or transfered to another |
@@ -1012,7 +1012,7 @@ static int transfer_objects(struct array_cache *to, | |||
1012 | #define drain_alien_cache(cachep, alien) do { } while (0) | 1012 | #define drain_alien_cache(cachep, alien) do { } while (0) |
1013 | #define reap_alien(cachep, l3) do { } while (0) | 1013 | #define reap_alien(cachep, l3) do { } while (0) |
1014 | 1014 | ||
1015 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 1015 | static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
1016 | { | 1016 | { |
1017 | return (struct array_cache **)BAD_ALIEN_MAGIC; | 1017 | return (struct array_cache **)BAD_ALIEN_MAGIC; |
1018 | } | 1018 | } |
@@ -1043,7 +1043,7 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep, | |||
1043 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); | 1043 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); |
1044 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | 1044 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); |
1045 | 1045 | ||
1046 | static struct array_cache **alloc_alien_cache(int node, int limit) | 1046 | static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
1047 | { | 1047 | { |
1048 | struct array_cache **ac_ptr; | 1048 | struct array_cache **ac_ptr; |
1049 | int memsize = sizeof(void *) * nr_node_ids; | 1049 | int memsize = sizeof(void *) * nr_node_ids; |
@@ -1051,14 +1051,14 @@ static struct array_cache **alloc_alien_cache(int node, int limit) | |||
1051 | 1051 | ||
1052 | if (limit > 1) | 1052 | if (limit > 1) |
1053 | limit = 12; | 1053 | limit = 12; |
1054 | ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); | 1054 | ac_ptr = kmalloc_node(memsize, gfp, node); |
1055 | if (ac_ptr) { | 1055 | if (ac_ptr) { |
1056 | for_each_node(i) { | 1056 | for_each_node(i) { |
1057 | if (i == node || !node_online(i)) { | 1057 | if (i == node || !node_online(i)) { |
1058 | ac_ptr[i] = NULL; | 1058 | ac_ptr[i] = NULL; |
1059 | continue; | 1059 | continue; |
1060 | } | 1060 | } |
1061 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); | 1061 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); |
1062 | if (!ac_ptr[i]) { | 1062 | if (!ac_ptr[i]) { |
1063 | for (i--; i >= 0; i--) | 1063 | for (i--; i >= 0; i--) |
1064 | kfree(ac_ptr[i]); | 1064 | kfree(ac_ptr[i]); |
@@ -1291,20 +1291,20 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1291 | struct array_cache **alien = NULL; | 1291 | struct array_cache **alien = NULL; |
1292 | 1292 | ||
1293 | nc = alloc_arraycache(node, cachep->limit, | 1293 | nc = alloc_arraycache(node, cachep->limit, |
1294 | cachep->batchcount); | 1294 | cachep->batchcount, GFP_KERNEL); |
1295 | if (!nc) | 1295 | if (!nc) |
1296 | goto bad; | 1296 | goto bad; |
1297 | if (cachep->shared) { | 1297 | if (cachep->shared) { |
1298 | shared = alloc_arraycache(node, | 1298 | shared = alloc_arraycache(node, |
1299 | cachep->shared * cachep->batchcount, | 1299 | cachep->shared * cachep->batchcount, |
1300 | 0xbaadf00d); | 1300 | 0xbaadf00d, GFP_KERNEL); |
1301 | if (!shared) { | 1301 | if (!shared) { |
1302 | kfree(nc); | 1302 | kfree(nc); |
1303 | goto bad; | 1303 | goto bad; |
1304 | } | 1304 | } |
1305 | } | 1305 | } |
1306 | if (use_alien_caches) { | 1306 | if (use_alien_caches) { |
1307 | alien = alloc_alien_cache(node, cachep->limit); | 1307 | alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); |
1308 | if (!alien) { | 1308 | if (!alien) { |
1309 | kfree(shared); | 1309 | kfree(shared); |
1310 | kfree(nc); | 1310 | kfree(nc); |
@@ -1408,10 +1408,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | |||
1408 | { | 1408 | { |
1409 | struct kmem_list3 *ptr; | 1409 | struct kmem_list3 *ptr; |
1410 | 1410 | ||
1411 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); | 1411 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid); |
1412 | BUG_ON(!ptr); | 1412 | BUG_ON(!ptr); |
1413 | 1413 | ||
1414 | local_irq_disable(); | ||
1415 | memcpy(ptr, list, sizeof(struct kmem_list3)); | 1414 | memcpy(ptr, list, sizeof(struct kmem_list3)); |
1416 | /* | 1415 | /* |
1417 | * Do not assume that spinlocks can be initialized via memcpy: | 1416 | * Do not assume that spinlocks can be initialized via memcpy: |
@@ -1420,7 +1419,6 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | |||
1420 | 1419 | ||
1421 | MAKE_ALL_LISTS(cachep, ptr, nodeid); | 1420 | MAKE_ALL_LISTS(cachep, ptr, nodeid); |
1422 | cachep->nodelists[nodeid] = ptr; | 1421 | cachep->nodelists[nodeid] = ptr; |
1423 | local_irq_enable(); | ||
1424 | } | 1422 | } |
1425 | 1423 | ||
1426 | /* | 1424 | /* |
@@ -1584,9 +1582,8 @@ void __init kmem_cache_init(void) | |||
1584 | { | 1582 | { |
1585 | struct array_cache *ptr; | 1583 | struct array_cache *ptr; |
1586 | 1584 | ||
1587 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1585 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1588 | 1586 | ||
1589 | local_irq_disable(); | ||
1590 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 1587 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); |
1591 | memcpy(ptr, cpu_cache_get(&cache_cache), | 1588 | memcpy(ptr, cpu_cache_get(&cache_cache), |
1592 | sizeof(struct arraycache_init)); | 1589 | sizeof(struct arraycache_init)); |
@@ -1596,11 +1593,9 @@ void __init kmem_cache_init(void) | |||
1596 | spin_lock_init(&ptr->lock); | 1593 | spin_lock_init(&ptr->lock); |
1597 | 1594 | ||
1598 | cache_cache.array[smp_processor_id()] = ptr; | 1595 | cache_cache.array[smp_processor_id()] = ptr; |
1599 | local_irq_enable(); | ||
1600 | 1596 | ||
1601 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1597 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1602 | 1598 | ||
1603 | local_irq_disable(); | ||
1604 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) | 1599 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) |
1605 | != &initarray_generic.cache); | 1600 | != &initarray_generic.cache); |
1606 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 1601 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), |
@@ -1612,7 +1607,6 @@ void __init kmem_cache_init(void) | |||
1612 | 1607 | ||
1613 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 1608 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = |
1614 | ptr; | 1609 | ptr; |
1615 | local_irq_enable(); | ||
1616 | } | 1610 | } |
1617 | /* 5) Replace the bootstrap kmem_list3's */ | 1611 | /* 5) Replace the bootstrap kmem_list3's */ |
1618 | { | 1612 | { |
@@ -1636,7 +1630,7 @@ void __init kmem_cache_init(void) | |||
1636 | struct kmem_cache *cachep; | 1630 | struct kmem_cache *cachep; |
1637 | mutex_lock(&cache_chain_mutex); | 1631 | mutex_lock(&cache_chain_mutex); |
1638 | list_for_each_entry(cachep, &cache_chain, next) | 1632 | list_for_each_entry(cachep, &cache_chain, next) |
1639 | if (enable_cpucache(cachep)) | 1633 | if (enable_cpucache(cachep, GFP_NOWAIT)) |
1640 | BUG(); | 1634 | BUG(); |
1641 | mutex_unlock(&cache_chain_mutex); | 1635 | mutex_unlock(&cache_chain_mutex); |
1642 | } | 1636 | } |
@@ -2073,10 +2067,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2073 | return left_over; | 2067 | return left_over; |
2074 | } | 2068 | } |
2075 | 2069 | ||
2076 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | 2070 | static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) |
2077 | { | 2071 | { |
2078 | if (g_cpucache_up == FULL) | 2072 | if (g_cpucache_up == FULL) |
2079 | return enable_cpucache(cachep); | 2073 | return enable_cpucache(cachep, gfp); |
2080 | 2074 | ||
2081 | if (g_cpucache_up == NONE) { | 2075 | if (g_cpucache_up == NONE) { |
2082 | /* | 2076 | /* |
@@ -2098,7 +2092,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | |||
2098 | g_cpucache_up = PARTIAL_AC; | 2092 | g_cpucache_up = PARTIAL_AC; |
2099 | } else { | 2093 | } else { |
2100 | cachep->array[smp_processor_id()] = | 2094 | cachep->array[smp_processor_id()] = |
2101 | kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 2095 | kmalloc(sizeof(struct arraycache_init), gfp); |
2102 | 2096 | ||
2103 | if (g_cpucache_up == PARTIAL_AC) { | 2097 | if (g_cpucache_up == PARTIAL_AC) { |
2104 | set_up_list3s(cachep, SIZE_L3); | 2098 | set_up_list3s(cachep, SIZE_L3); |
@@ -2162,6 +2156,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2162 | { | 2156 | { |
2163 | size_t left_over, slab_size, ralign; | 2157 | size_t left_over, slab_size, ralign; |
2164 | struct kmem_cache *cachep = NULL, *pc; | 2158 | struct kmem_cache *cachep = NULL, *pc; |
2159 | gfp_t gfp; | ||
2165 | 2160 | ||
2166 | /* | 2161 | /* |
2167 | * Sanity checks... these are all serious usage bugs. | 2162 | * Sanity checks... these are all serious usage bugs. |
@@ -2177,8 +2172,10 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2177 | * We use cache_chain_mutex to ensure a consistent view of | 2172 | * We use cache_chain_mutex to ensure a consistent view of |
2178 | * cpu_online_mask as well. Please see cpuup_callback | 2173 | * cpu_online_mask as well. Please see cpuup_callback |
2179 | */ | 2174 | */ |
2180 | get_online_cpus(); | 2175 | if (slab_is_available()) { |
2181 | mutex_lock(&cache_chain_mutex); | 2176 | get_online_cpus(); |
2177 | mutex_lock(&cache_chain_mutex); | ||
2178 | } | ||
2182 | 2179 | ||
2183 | list_for_each_entry(pc, &cache_chain, next) { | 2180 | list_for_each_entry(pc, &cache_chain, next) { |
2184 | char tmp; | 2181 | char tmp; |
@@ -2287,8 +2284,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2287 | */ | 2284 | */ |
2288 | align = ralign; | 2285 | align = ralign; |
2289 | 2286 | ||
2287 | if (slab_is_available()) | ||
2288 | gfp = GFP_KERNEL; | ||
2289 | else | ||
2290 | gfp = GFP_NOWAIT; | ||
2291 | |||
2290 | /* Get cache's description obj. */ | 2292 | /* Get cache's description obj. */ |
2291 | cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); | 2293 | cachep = kmem_cache_zalloc(&cache_cache, gfp); |
2292 | if (!cachep) | 2294 | if (!cachep) |
2293 | goto oops; | 2295 | goto oops; |
2294 | 2296 | ||
@@ -2391,7 +2393,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2391 | cachep->ctor = ctor; | 2393 | cachep->ctor = ctor; |
2392 | cachep->name = name; | 2394 | cachep->name = name; |
2393 | 2395 | ||
2394 | if (setup_cpu_cache(cachep)) { | 2396 | if (setup_cpu_cache(cachep, gfp)) { |
2395 | __kmem_cache_destroy(cachep); | 2397 | __kmem_cache_destroy(cachep); |
2396 | cachep = NULL; | 2398 | cachep = NULL; |
2397 | goto oops; | 2399 | goto oops; |
@@ -2403,8 +2405,10 @@ oops: | |||
2403 | if (!cachep && (flags & SLAB_PANIC)) | 2405 | if (!cachep && (flags & SLAB_PANIC)) |
2404 | panic("kmem_cache_create(): failed to create slab `%s'\n", | 2406 | panic("kmem_cache_create(): failed to create slab `%s'\n", |
2405 | name); | 2407 | name); |
2406 | mutex_unlock(&cache_chain_mutex); | 2408 | if (slab_is_available()) { |
2407 | put_online_cpus(); | 2409 | mutex_unlock(&cache_chain_mutex); |
2410 | put_online_cpus(); | ||
2411 | } | ||
2408 | return cachep; | 2412 | return cachep; |
2409 | } | 2413 | } |
2410 | EXPORT_SYMBOL(kmem_cache_create); | 2414 | EXPORT_SYMBOL(kmem_cache_create); |
@@ -3830,7 +3834,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name); | |||
3830 | /* | 3834 | /* |
3831 | * This initializes kmem_list3 or resizes various caches for all nodes. | 3835 | * This initializes kmem_list3 or resizes various caches for all nodes. |
3832 | */ | 3836 | */ |
3833 | static int alloc_kmemlist(struct kmem_cache *cachep) | 3837 | static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) |
3834 | { | 3838 | { |
3835 | int node; | 3839 | int node; |
3836 | struct kmem_list3 *l3; | 3840 | struct kmem_list3 *l3; |
@@ -3840,7 +3844,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3840 | for_each_online_node(node) { | 3844 | for_each_online_node(node) { |
3841 | 3845 | ||
3842 | if (use_alien_caches) { | 3846 | if (use_alien_caches) { |
3843 | new_alien = alloc_alien_cache(node, cachep->limit); | 3847 | new_alien = alloc_alien_cache(node, cachep->limit, gfp); |
3844 | if (!new_alien) | 3848 | if (!new_alien) |
3845 | goto fail; | 3849 | goto fail; |
3846 | } | 3850 | } |
@@ -3849,7 +3853,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3849 | if (cachep->shared) { | 3853 | if (cachep->shared) { |
3850 | new_shared = alloc_arraycache(node, | 3854 | new_shared = alloc_arraycache(node, |
3851 | cachep->shared*cachep->batchcount, | 3855 | cachep->shared*cachep->batchcount, |
3852 | 0xbaadf00d); | 3856 | 0xbaadf00d, gfp); |
3853 | if (!new_shared) { | 3857 | if (!new_shared) { |
3854 | free_alien_cache(new_alien); | 3858 | free_alien_cache(new_alien); |
3855 | goto fail; | 3859 | goto fail; |
@@ -3878,7 +3882,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep) | |||
3878 | free_alien_cache(new_alien); | 3882 | free_alien_cache(new_alien); |
3879 | continue; | 3883 | continue; |
3880 | } | 3884 | } |
3881 | l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); | 3885 | l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node); |
3882 | if (!l3) { | 3886 | if (!l3) { |
3883 | free_alien_cache(new_alien); | 3887 | free_alien_cache(new_alien); |
3884 | kfree(new_shared); | 3888 | kfree(new_shared); |
@@ -3934,18 +3938,18 @@ static void do_ccupdate_local(void *info) | |||
3934 | 3938 | ||
3935 | /* Always called with the cache_chain_mutex held */ | 3939 | /* Always called with the cache_chain_mutex held */ |
3936 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | 3940 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, |
3937 | int batchcount, int shared) | 3941 | int batchcount, int shared, gfp_t gfp) |
3938 | { | 3942 | { |
3939 | struct ccupdate_struct *new; | 3943 | struct ccupdate_struct *new; |
3940 | int i; | 3944 | int i; |
3941 | 3945 | ||
3942 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 3946 | new = kzalloc(sizeof(*new), gfp); |
3943 | if (!new) | 3947 | if (!new) |
3944 | return -ENOMEM; | 3948 | return -ENOMEM; |
3945 | 3949 | ||
3946 | for_each_online_cpu(i) { | 3950 | for_each_online_cpu(i) { |
3947 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, | 3951 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, |
3948 | batchcount); | 3952 | batchcount, gfp); |
3949 | if (!new->new[i]) { | 3953 | if (!new->new[i]) { |
3950 | for (i--; i >= 0; i--) | 3954 | for (i--; i >= 0; i--) |
3951 | kfree(new->new[i]); | 3955 | kfree(new->new[i]); |
@@ -3972,11 +3976,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3972 | kfree(ccold); | 3976 | kfree(ccold); |
3973 | } | 3977 | } |
3974 | kfree(new); | 3978 | kfree(new); |
3975 | return alloc_kmemlist(cachep); | 3979 | return alloc_kmemlist(cachep, gfp); |
3976 | } | 3980 | } |
3977 | 3981 | ||
3978 | /* Called with cache_chain_mutex held always */ | 3982 | /* Called with cache_chain_mutex held always */ |
3979 | static int enable_cpucache(struct kmem_cache *cachep) | 3983 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) |
3980 | { | 3984 | { |
3981 | int err; | 3985 | int err; |
3982 | int limit, shared; | 3986 | int limit, shared; |
@@ -4022,7 +4026,7 @@ static int enable_cpucache(struct kmem_cache *cachep) | |||
4022 | if (limit > 32) | 4026 | if (limit > 32) |
4023 | limit = 32; | 4027 | limit = 32; |
4024 | #endif | 4028 | #endif |
4025 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); | 4029 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); |
4026 | if (err) | 4030 | if (err) |
4027 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | 4031 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", |
4028 | cachep->name, -err); | 4032 | cachep->name, -err); |
@@ -4328,7 +4332,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, | |||
4328 | res = 0; | 4332 | res = 0; |
4329 | } else { | 4333 | } else { |
4330 | res = do_tune_cpucache(cachep, limit, | 4334 | res = do_tune_cpucache(cachep, limit, |
4331 | batchcount, shared); | 4335 | batchcount, shared, |
4336 | GFP_KERNEL); | ||
4332 | } | 4337 | } |
4333 | break; | 4338 | break; |
4334 | } | 4339 | } |
@@ -2560,13 +2560,16 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | |||
2560 | if (gfp_flags & SLUB_DMA) | 2560 | if (gfp_flags & SLUB_DMA) |
2561 | flags = SLAB_CACHE_DMA; | 2561 | flags = SLAB_CACHE_DMA; |
2562 | 2562 | ||
2563 | down_write(&slub_lock); | 2563 | /* |
2564 | * This function is called with IRQs disabled during early-boot on | ||
2565 | * single CPU so there's no need to take slub_lock here. | ||
2566 | */ | ||
2564 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2567 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, |
2565 | flags, NULL)) | 2568 | flags, NULL)) |
2566 | goto panic; | 2569 | goto panic; |
2567 | 2570 | ||
2568 | list_add(&s->list, &slab_caches); | 2571 | list_add(&s->list, &slab_caches); |
2569 | up_write(&slub_lock); | 2572 | |
2570 | if (sysfs_slab_add(s)) | 2573 | if (sysfs_slab_add(s)) |
2571 | goto panic; | 2574 | goto panic; |
2572 | return s; | 2575 | return s; |
@@ -3024,7 +3027,7 @@ void __init kmem_cache_init(void) | |||
3024 | * kmem_cache_open for slab_state == DOWN. | 3027 | * kmem_cache_open for slab_state == DOWN. |
3025 | */ | 3028 | */ |
3026 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", | 3029 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", |
3027 | sizeof(struct kmem_cache_node), GFP_KERNEL); | 3030 | sizeof(struct kmem_cache_node), GFP_NOWAIT); |
3028 | kmalloc_caches[0].refcount = -1; | 3031 | kmalloc_caches[0].refcount = -1; |
3029 | caches++; | 3032 | caches++; |
3030 | 3033 | ||
@@ -3037,16 +3040,16 @@ void __init kmem_cache_init(void) | |||
3037 | /* Caches that are not of the two-to-the-power-of size */ | 3040 | /* Caches that are not of the two-to-the-power-of size */ |
3038 | if (KMALLOC_MIN_SIZE <= 64) { | 3041 | if (KMALLOC_MIN_SIZE <= 64) { |
3039 | create_kmalloc_cache(&kmalloc_caches[1], | 3042 | create_kmalloc_cache(&kmalloc_caches[1], |
3040 | "kmalloc-96", 96, GFP_KERNEL); | 3043 | "kmalloc-96", 96, GFP_NOWAIT); |
3041 | caches++; | 3044 | caches++; |
3042 | create_kmalloc_cache(&kmalloc_caches[2], | 3045 | create_kmalloc_cache(&kmalloc_caches[2], |
3043 | "kmalloc-192", 192, GFP_KERNEL); | 3046 | "kmalloc-192", 192, GFP_NOWAIT); |
3044 | caches++; | 3047 | caches++; |
3045 | } | 3048 | } |
3046 | 3049 | ||
3047 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | 3050 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
3048 | create_kmalloc_cache(&kmalloc_caches[i], | 3051 | create_kmalloc_cache(&kmalloc_caches[i], |
3049 | "kmalloc", 1 << i, GFP_KERNEL); | 3052 | "kmalloc", 1 << i, GFP_NOWAIT); |
3050 | caches++; | 3053 | caches++; |
3051 | } | 3054 | } |
3052 | 3055 | ||
@@ -3083,7 +3086,7 @@ void __init kmem_cache_init(void) | |||
3083 | /* Provide the correct kmalloc names now that the caches are up */ | 3086 | /* Provide the correct kmalloc names now that the caches are up */ |
3084 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) | 3087 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
3085 | kmalloc_caches[i]. name = | 3088 | kmalloc_caches[i]. name = |
3086 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3089 | kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); |
3087 | 3090 | ||
3088 | #ifdef CONFIG_SMP | 3091 | #ifdef CONFIG_SMP |
3089 | register_cpu_notifier(&slab_notifier); | 3092 | register_cpu_notifier(&slab_notifier); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b7db93572797..f8189a4b3e13 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/rbtree.h> | 23 | #include <linux/rbtree.h> |
24 | #include <linux/radix-tree.h> | 24 | #include <linux/radix-tree.h> |
25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
26 | #include <linux/bootmem.h> | ||
27 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
28 | #include <linux/kmemleak.h> | 27 | #include <linux/kmemleak.h> |
29 | 28 | ||
@@ -1033,7 +1032,7 @@ void __init vmalloc_init(void) | |||
1033 | 1032 | ||
1034 | /* Import existing vmlist entries. */ | 1033 | /* Import existing vmlist entries. */ |
1035 | for (tmp = vmlist; tmp; tmp = tmp->next) { | 1034 | for (tmp = vmlist; tmp; tmp = tmp->next) { |
1036 | va = alloc_bootmem(sizeof(struct vmap_area)); | 1035 | va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); |
1037 | va->flags = tmp->flags | VM_VM_AREA; | 1036 | va->flags = tmp->flags | VM_VM_AREA; |
1038 | va->va_start = (unsigned long)tmp->addr; | 1037 | va->va_start = (unsigned long)tmp->addr; |
1039 | va->va_end = va->va_start + tmp->size; | 1038 | va->va_end = va->va_start + tmp->size; |