aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/mempolicy.c7
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmscan.c4
7 files changed, 19 insertions, 9 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 4e9937ac3529..391ffc54d136 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -29,7 +29,7 @@ config FLATMEM_MANUAL
29 If unsure, choose this option (Flat Memory) over any other. 29 If unsure, choose this option (Flat Memory) over any other.
30 30
31config DISCONTIGMEM_MANUAL 31config DISCONTIGMEM_MANUAL
32 bool "Discontigious Memory" 32 bool "Discontiguous Memory"
33 depends on ARCH_DISCONTIGMEM_ENABLE 33 depends on ARCH_DISCONTIGMEM_ENABLE
34 help 34 help
35 This option provides enhanced support for discontiguous 35 This option provides enhanced support for discontiguous
@@ -52,7 +52,7 @@ config SPARSEMEM_MANUAL
52 memory hotplug systems. This is normal. 52 memory hotplug systems. This is normal.
53 53
54 For many other systems, this will be an alternative to 54 For many other systems, this will be an alternative to
55 "Discontigious Memory". This option provides some potential 55 "Discontiguous Memory". This option provides some potential
56 performance benefits, along with decreased code complexity, 56 performance benefits, along with decreased code complexity,
57 but it is newer, and more experimental. 57 but it is newer, and more experimental.
58 58
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index afa06e184d88..9033f0859aa8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -333,8 +333,13 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
333 if (prev && prev->vm_end < vma->vm_start) 333 if (prev && prev->vm_end < vma->vm_start)
334 return ERR_PTR(-EFAULT); 334 return ERR_PTR(-EFAULT);
335 if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) { 335 if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) {
336 unsigned long endvma = vma->vm_end;
337 if (endvma > end)
338 endvma = end;
339 if (vma->vm_start > start)
340 start = vma->vm_start;
336 err = check_pgd_range(vma->vm_mm, 341 err = check_pgd_range(vma->vm_mm,
337 vma->vm_start, vma->vm_end, nodes); 342 start, endvma, nodes);
338 if (err) { 343 if (err) {
339 first = ERR_PTR(err); 344 first = ERR_PTR(err);
340 break; 345 break;
diff --git a/mm/mmap.c b/mm/mmap.c
index 12334aecf8ad..fa11d91242e8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1640,7 +1640,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1640/* 1640/*
1641 * Get rid of page table information in the indicated region. 1641 * Get rid of page table information in the indicated region.
1642 * 1642 *
1643 * Called with the page table lock held. 1643 * Called with the mm semaphore held.
1644 */ 1644 */
1645static void unmap_region(struct mm_struct *mm, 1645static void unmap_region(struct mm_struct *mm,
1646 struct vm_area_struct *vma, struct vm_area_struct *prev, 1646 struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -1993,6 +1993,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1993 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); 1993 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
1994 if (__vma && __vma->vm_start < vma->vm_end) 1994 if (__vma && __vma->vm_start < vma->vm_end)
1995 return -ENOMEM; 1995 return -ENOMEM;
1996 if ((vma->vm_flags & VM_ACCOUNT) &&
1997 security_vm_enough_memory(vma_pages(vma)))
1998 return -ENOMEM;
1996 vma_link(mm, vma, prev, rb_link, rb_parent); 1999 vma_link(mm, vma, prev, rb_link, rb_parent);
1997 return 0; 2000 return 0;
1998} 2001}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e9fbd013ad9a..57577f63b305 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -248,7 +248,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
248 248
249 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); 249 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
250 250
251 if ((newflags & ~(newflags >> 4)) & 0xf) { 251 /* newflags >> 4 shift VM_MAY% in place of VM_% */
252 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
252 error = -EACCES; 253 error = -EACCES;
253 goto out; 254 goto out;
254 } 255 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c5823c395f71..ae2903339e71 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -22,6 +22,7 @@
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/kernel.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/suspend.h> 27#include <linux/suspend.h>
27#include <linux/pagevec.h> 28#include <linux/pagevec.h>
@@ -117,7 +118,7 @@ static void bad_page(const char *function, struct page *page)
117 set_page_count(page, 0); 118 set_page_count(page, 0);
118 reset_page_mapcount(page); 119 reset_page_mapcount(page);
119 page->mapping = NULL; 120 page->mapping = NULL;
120 tainted |= TAINT_BAD_PAGE; 121 add_taint(TAINT_BAD_PAGE);
121} 122}
122 123
123#ifndef CONFIG_HUGETLB_PAGE 124#ifndef CONFIG_HUGETLB_PAGE
diff --git a/mm/slab.c b/mm/slab.c
index 9e876d6dfad9..437d3388054b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -659,7 +659,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
659 * kmem_cache_create(), or __kmalloc(), before 659 * kmem_cache_create(), or __kmalloc(), before
660 * the generic caches are initialized. 660 * the generic caches are initialized.
661 */ 661 */
662 BUG_ON(csizep->cs_cachep == NULL); 662 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
663#endif 663#endif
664 while (size > csizep->cs_size) 664 while (size > csizep->cs_size)
665 csizep++; 665 csizep++;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a740778f688d..0ea71e887bb6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1258,9 +1258,9 @@ void wakeup_kswapd(struct zone *zone, int order)
1258 pgdat->kswapd_max_order = order; 1258 pgdat->kswapd_max_order = order;
1259 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1259 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1260 return; 1260 return;
1261 if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) 1261 if (!waitqueue_active(&pgdat->kswapd_wait))
1262 return; 1262 return;
1263 wake_up_interruptible(&zone->zone_pgdat->kswapd_wait); 1263 wake_up_interruptible(&pgdat->kswapd_wait);
1264} 1264}
1265 1265
1266#ifdef CONFIG_PM 1266#ifdef CONFIG_PM