aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/slab.c2
4 files changed, 9 insertions, 5 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 4e9937ac3529..391ffc54d136 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -29,7 +29,7 @@ config FLATMEM_MANUAL
29 If unsure, choose this option (Flat Memory) over any other. 29 If unsure, choose this option (Flat Memory) over any other.
30 30
31config DISCONTIGMEM_MANUAL 31config DISCONTIGMEM_MANUAL
32 bool "Discontigious Memory" 32 bool "Discontiguous Memory"
33 depends on ARCH_DISCONTIGMEM_ENABLE 33 depends on ARCH_DISCONTIGMEM_ENABLE
34 help 34 help
35 This option provides enhanced support for discontiguous 35 This option provides enhanced support for discontiguous
@@ -52,7 +52,7 @@ config SPARSEMEM_MANUAL
52 memory hotplug systems. This is normal. 52 memory hotplug systems. This is normal.
53 53
54 For many other systems, this will be an alternative to 54 For many other systems, this will be an alternative to
55 "Discontigious Memory". This option provides some potential 55 "Discontiguous Memory". This option provides some potential
56 performance benefits, along with decreased code complexity, 56 performance benefits, along with decreased code complexity,
57 but it is newer, and more experimental. 57 but it is newer, and more experimental.
58 58
diff --git a/mm/mmap.c b/mm/mmap.c
index 12334aecf8ad..fa11d91242e8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1640,7 +1640,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1640/* 1640/*
1641 * Get rid of page table information in the indicated region. 1641 * Get rid of page table information in the indicated region.
1642 * 1642 *
1643 * Called with the page table lock held. 1643 * Called with the mm semaphore held.
1644 */ 1644 */
1645static void unmap_region(struct mm_struct *mm, 1645static void unmap_region(struct mm_struct *mm,
1646 struct vm_area_struct *vma, struct vm_area_struct *prev, 1646 struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -1993,6 +1993,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1993 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); 1993 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
1994 if (__vma && __vma->vm_start < vma->vm_end) 1994 if (__vma && __vma->vm_start < vma->vm_end)
1995 return -ENOMEM; 1995 return -ENOMEM;
1996 if ((vma->vm_flags & VM_ACCOUNT) &&
1997 security_vm_enough_memory(vma_pages(vma)))
1998 return -ENOMEM;
1996 vma_link(mm, vma, prev, rb_link, rb_parent); 1999 vma_link(mm, vma, prev, rb_link, rb_parent);
1997 return 0; 2000 return 0;
1998} 2001}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e9fbd013ad9a..57577f63b305 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -248,7 +248,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
248 248
249 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); 249 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
250 250
251 if ((newflags & ~(newflags >> 4)) & 0xf) { 251 /* newflags >> 4 shift VM_MAY% in place of VM_% */
252 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
252 error = -EACCES; 253 error = -EACCES;
253 goto out; 254 goto out;
254 } 255 }
diff --git a/mm/slab.c b/mm/slab.c
index 9e876d6dfad9..437d3388054b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -659,7 +659,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
659 * kmem_cache_create(), or __kmalloc(), before 659 * kmem_cache_create(), or __kmalloc(), before
660 * the generic caches are initialized. 660 * the generic caches are initialized.
661 */ 661 */
662 BUG_ON(csizep->cs_cachep == NULL); 662 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
663#endif 663#endif
664 while (size > csizep->cs_size) 664 while (size > csizep->cs_size)
665 csizep++; 665 csizep++;