aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-03-12 21:46:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-03-12 21:46:19 -0400
commitc202baf017aea0c860e53131bc55bb1af7177e76 (patch)
treef3b19d008aac14979e115542f4b689e6c99f33cd /mm
parentba68bc0115ebfc37f911db4e87bf5f7991f89698 (diff)
parent7feee590bb18ffc42636975f74c2c3120ce1901c (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "13 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: memcg: disable hierarchy support if bound to the legacy cgroup hierarchy mm: reorder can_do_mlock to fix audit denial kasan, module: move MODULE_ALIGN macro into <linux/moduleloader.h> kasan, module, vmalloc: rework shadow allocation for modules fanotify: fix event filtering with FAN_ONDIR set mm/nommu.c: export symbol max_mapnr arch/c6x/include/asm/pgtable.h: define dummy pgprot_writecombine for !MMU nilfs2: fix deadlock of segment constructor during recovery mm: cma: fix CMA aligned offset calculation mm, hugetlb: close race when setting PageTail for gigantic pages mm, oom: do not fail __GFP_NOFAIL allocation if oom killer is disabled drivers/rtc/rtc-s3c.c: add .needs_src_clk to s3c6410 RTC data ocfs2: make append_dio an incompat feature
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c12
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/kasan/kasan.c14
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/vmalloc.c1
8 files changed, 30 insertions, 13 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 75016fd1de90..68ecb7a42983 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
64 return (1UL << (align_order - cma->order_per_bit)) - 1; 64 return (1UL << (align_order - cma->order_per_bit)) - 1;
65} 65}
66 66
67/*
68 * Find a PFN aligned to the specified order and return an offset represented in
69 * order_per_bits.
70 */
67static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) 71static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
68{ 72{
69 unsigned int alignment;
70
71 if (align_order <= cma->order_per_bit) 73 if (align_order <= cma->order_per_bit)
72 return 0; 74 return 0;
73 alignment = 1UL << (align_order - cma->order_per_bit); 75
74 return ALIGN(cma->base_pfn, alignment) - 76 return (ALIGN(cma->base_pfn, (1UL << align_order))
75 (cma->base_pfn >> cma->order_per_bit); 77 - cma->base_pfn) >> cma->order_per_bit;
76} 78}
77 79
78static unsigned long cma_bitmap_maxno(struct cma *cma) 80static unsigned long cma_bitmap_maxno(struct cma *cma)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a9ac6c26832..c41b2a0ee273 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
917 __SetPageHead(page); 917 __SetPageHead(page);
918 __ClearPageReserved(page); 918 __ClearPageReserved(page);
919 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 919 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
920 __SetPageTail(p);
921 /* 920 /*
922 * For gigantic hugepages allocated through bootmem at 921 * For gigantic hugepages allocated through bootmem at
923 * boot, it's safer to be consistent with the not-gigantic 922 * boot, it's safer to be consistent with the not-gigantic
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
933 __ClearPageReserved(p); 932 __ClearPageReserved(p);
934 set_page_count(p, 0); 933 set_page_count(p, 0);
935 p->first_page = page; 934 p->first_page = page;
935 /* Make sure p->first_page is always valid for PageTail() */
936 smp_wmb();
937 __SetPageTail(p);
936 } 938 }
937} 939}
938 940
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 78fee632a7ee..936d81661c47 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -29,6 +29,7 @@
29#include <linux/stacktrace.h> 29#include <linux/stacktrace.h>
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/vmalloc.h>
32#include <linux/kasan.h> 33#include <linux/kasan.h>
33 34
34#include "kasan.h" 35#include "kasan.h"
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size)
414 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 415 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
415 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 416 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
416 __builtin_return_address(0)); 417 __builtin_return_address(0));
417 return ret ? 0 : -ENOMEM; 418
419 if (ret) {
420 find_vm_area(addr)->flags |= VM_KASAN;
421 return 0;
422 }
423
424 return -ENOMEM;
418} 425}
419 426
420void kasan_module_free(void *addr) 427void kasan_free_shadow(const struct vm_struct *vm)
421{ 428{
422 vfree(kasan_mem_to_shadow(addr)); 429 if (vm->flags & VM_KASAN)
430 vfree(kasan_mem_to_shadow(vm->addr));
423} 431}
424 432
425static void register_global(struct kasan_global *global) 433static void register_global(struct kasan_global *global)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9fe07692eaad..b34ef4a32a3b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5232 * on for the root memcg is enough. 5232 * on for the root memcg is enough.
5233 */ 5233 */
5234 if (cgroup_on_dfl(root_css->cgroup)) 5234 if (cgroup_on_dfl(root_css->cgroup))
5235 mem_cgroup_from_css(root_css)->use_hierarchy = true; 5235 root_mem_cgroup->use_hierarchy = true;
5236 else
5237 root_mem_cgroup->use_hierarchy = false;
5236} 5238}
5237 5239
5238static u64 memory_current_read(struct cgroup_subsys_state *css, 5240static u64 memory_current_read(struct cgroup_subsys_state *css,
diff --git a/mm/mlock.c b/mm/mlock.c
index 73cf0987088c..8a54cd214925 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -26,10 +26,10 @@
26 26
27int can_do_mlock(void) 27int can_do_mlock(void)
28{ 28{
29 if (capable(CAP_IPC_LOCK))
30 return 1;
31 if (rlimit(RLIMIT_MEMLOCK) != 0) 29 if (rlimit(RLIMIT_MEMLOCK) != 0)
32 return 1; 30 return 1;
31 if (capable(CAP_IPC_LOCK))
32 return 1;
33 return 0; 33 return 0;
34} 34}
35EXPORT_SYMBOL(can_do_mlock); 35EXPORT_SYMBOL(can_do_mlock);
diff --git a/mm/nommu.c b/mm/nommu.c
index 3e67e7538ecf..3fba2dc97c44 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -62,6 +62,7 @@ void *high_memory;
62EXPORT_SYMBOL(high_memory); 62EXPORT_SYMBOL(high_memory);
63struct page *mem_map; 63struct page *mem_map;
64unsigned long max_mapnr; 64unsigned long max_mapnr;
65EXPORT_SYMBOL(max_mapnr);
65unsigned long highest_memmap_pfn; 66unsigned long highest_memmap_pfn;
66struct percpu_counter vm_committed_as; 67struct percpu_counter vm_committed_as;
67int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 68int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7abfa70cdc1a..40e29429e7b0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2373 goto out; 2373 goto out;
2374 } 2374 }
2375 /* Exhausted what can be done so it's blamo time */ 2375 /* Exhausted what can be done so it's blamo time */
2376 if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)) 2376 if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
2377 || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
2377 *did_some_progress = 1; 2378 *did_some_progress = 1;
2378out: 2379out:
2379 oom_zonelist_unlock(ac->zonelist, gfp_mask); 2380 oom_zonelist_unlock(ac->zonelist, gfp_mask);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 35b25e1340ca..49abccf29a29 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr)
1418 spin_unlock(&vmap_area_lock); 1418 spin_unlock(&vmap_area_lock);
1419 1419
1420 vmap_debug_free_range(va->va_start, va->va_end); 1420 vmap_debug_free_range(va->va_start, va->va_end);
1421 kasan_free_shadow(vm);
1421 free_unmap_vmap_area(va); 1422 free_unmap_vmap_area(va);
1422 vm->size -= PAGE_SIZE; 1423 vm->size -= PAGE_SIZE;
1423 1424