aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/mempolicy.c7
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slab.c47
-rw-r--r--mm/swapfile.c1
-rw-r--r--mm/vmscan.c4
8 files changed, 43 insertions, 31 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 4e9937ac3529..391ffc54d136 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -29,7 +29,7 @@ config FLATMEM_MANUAL
29 If unsure, choose this option (Flat Memory) over any other. 29 If unsure, choose this option (Flat Memory) over any other.
30 30
31config DISCONTIGMEM_MANUAL 31config DISCONTIGMEM_MANUAL
32 bool "Discontigious Memory" 32 bool "Discontiguous Memory"
33 depends on ARCH_DISCONTIGMEM_ENABLE 33 depends on ARCH_DISCONTIGMEM_ENABLE
34 help 34 help
35 This option provides enhanced support for discontiguous 35 This option provides enhanced support for discontiguous
@@ -52,7 +52,7 @@ config SPARSEMEM_MANUAL
52 memory hotplug systems. This is normal. 52 memory hotplug systems. This is normal.
53 53
54 For many other systems, this will be an alternative to 54 For many other systems, this will be an alternative to
55 "Discontigious Memory". This option provides some potential 55 "Discontiguous Memory". This option provides some potential
56 performance benefits, along with decreased code complexity, 56 performance benefits, along with decreased code complexity,
57 but it is newer, and more experimental. 57 but it is newer, and more experimental.
58 58
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index afa06e184d88..9033f0859aa8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -333,8 +333,13 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
333 if (prev && prev->vm_end < vma->vm_start) 333 if (prev && prev->vm_end < vma->vm_start)
334 return ERR_PTR(-EFAULT); 334 return ERR_PTR(-EFAULT);
335 if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) { 335 if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) {
336 unsigned long endvma = vma->vm_end;
337 if (endvma > end)
338 endvma = end;
339 if (vma->vm_start > start)
340 start = vma->vm_start;
336 err = check_pgd_range(vma->vm_mm, 341 err = check_pgd_range(vma->vm_mm,
337 vma->vm_start, vma->vm_end, nodes); 342 start, endvma, nodes);
338 if (err) { 343 if (err) {
339 first = ERR_PTR(err); 344 first = ERR_PTR(err);
340 break; 345 break;
diff --git a/mm/mmap.c b/mm/mmap.c
index 12334aecf8ad..fa11d91242e8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1640,7 +1640,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1640/* 1640/*
1641 * Get rid of page table information in the indicated region. 1641 * Get rid of page table information in the indicated region.
1642 * 1642 *
1643 * Called with the page table lock held. 1643 * Called with the mm semaphore held.
1644 */ 1644 */
1645static void unmap_region(struct mm_struct *mm, 1645static void unmap_region(struct mm_struct *mm,
1646 struct vm_area_struct *vma, struct vm_area_struct *prev, 1646 struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -1993,6 +1993,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1993 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); 1993 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
1994 if (__vma && __vma->vm_start < vma->vm_end) 1994 if (__vma && __vma->vm_start < vma->vm_end)
1995 return -ENOMEM; 1995 return -ENOMEM;
1996 if ((vma->vm_flags & VM_ACCOUNT) &&
1997 security_vm_enough_memory(vma_pages(vma)))
1998 return -ENOMEM;
1996 vma_link(mm, vma, prev, rb_link, rb_parent); 1999 vma_link(mm, vma, prev, rb_link, rb_parent);
1997 return 0; 2000 return 0;
1998} 2001}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e9fbd013ad9a..57577f63b305 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -248,7 +248,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
248 248
249 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); 249 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
250 250
251 if ((newflags & ~(newflags >> 4)) & 0xf) { 251 /* newflags >> 4 shift VM_MAY% in place of VM_% */
252 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
252 error = -EACCES; 253 error = -EACCES;
253 goto out; 254 goto out;
254 } 255 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c5823c395f71..ae2903339e71 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -22,6 +22,7 @@
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/kernel.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/suspend.h> 27#include <linux/suspend.h>
27#include <linux/pagevec.h> 28#include <linux/pagevec.h>
@@ -117,7 +118,7 @@ static void bad_page(const char *function, struct page *page)
117 set_page_count(page, 0); 118 set_page_count(page, 0);
118 reset_page_mapcount(page); 119 reset_page_mapcount(page);
119 page->mapping = NULL; 120 page->mapping = NULL;
120 tainted |= TAINT_BAD_PAGE; 121 add_taint(TAINT_BAD_PAGE);
121} 122}
122 123
123#ifndef CONFIG_HUGETLB_PAGE 124#ifndef CONFIG_HUGETLB_PAGE
diff --git a/mm/slab.c b/mm/slab.c
index 9e876d6dfad9..c9adfce00405 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -308,12 +308,12 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
308#define SIZE_L3 (1 + MAX_NUMNODES) 308#define SIZE_L3 (1 + MAX_NUMNODES)
309 309
310/* 310/*
311 * This function may be completely optimized away if 311 * This function must be completely optimized away if
312 * a constant is passed to it. Mostly the same as 312 * a constant is passed to it. Mostly the same as
313 * what is in linux/slab.h except it returns an 313 * what is in linux/slab.h except it returns an
314 * index. 314 * index.
315 */ 315 */
316static inline int index_of(const size_t size) 316static __always_inline int index_of(const size_t size)
317{ 317{
318 if (__builtin_constant_p(size)) { 318 if (__builtin_constant_p(size)) {
319 int i = 0; 319 int i = 0;
@@ -329,7 +329,8 @@ static inline int index_of(const size_t size)
329 extern void __bad_size(void); 329 extern void __bad_size(void);
330 __bad_size(); 330 __bad_size();
331 } 331 }
332 } 332 } else
333 BUG();
333 return 0; 334 return 0;
334} 335}
335 336
@@ -639,7 +640,7 @@ static enum {
639 640
640static DEFINE_PER_CPU(struct work_struct, reap_work); 641static DEFINE_PER_CPU(struct work_struct, reap_work);
641 642
642static void free_block(kmem_cache_t* cachep, void** objpp, int len); 643static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
643static void enable_cpucache (kmem_cache_t *cachep); 644static void enable_cpucache (kmem_cache_t *cachep);
644static void cache_reap (void *unused); 645static void cache_reap (void *unused);
645static int __node_shrink(kmem_cache_t *cachep, int node); 646static int __node_shrink(kmem_cache_t *cachep, int node);
@@ -659,7 +660,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
659 * kmem_cache_create(), or __kmalloc(), before 660 * kmem_cache_create(), or __kmalloc(), before
660 * the generic caches are initialized. 661 * the generic caches are initialized.
661 */ 662 */
662 BUG_ON(csizep->cs_cachep == NULL); 663 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
663#endif 664#endif
664 while (size > csizep->cs_size) 665 while (size > csizep->cs_size)
665 csizep++; 666 csizep++;
@@ -804,7 +805,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
804 805
805 if (ac->avail) { 806 if (ac->avail) {
806 spin_lock(&rl3->list_lock); 807 spin_lock(&rl3->list_lock);
807 free_block(cachep, ac->entry, ac->avail); 808 free_block(cachep, ac->entry, ac->avail, node);
808 ac->avail = 0; 809 ac->avail = 0;
809 spin_unlock(&rl3->list_lock); 810 spin_unlock(&rl3->list_lock);
810 } 811 }
@@ -925,7 +926,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
925 /* Free limit for this kmem_list3 */ 926 /* Free limit for this kmem_list3 */
926 l3->free_limit -= cachep->batchcount; 927 l3->free_limit -= cachep->batchcount;
927 if (nc) 928 if (nc)
928 free_block(cachep, nc->entry, nc->avail); 929 free_block(cachep, nc->entry, nc->avail, node);
929 930
930 if (!cpus_empty(mask)) { 931 if (!cpus_empty(mask)) {
931 spin_unlock(&l3->list_lock); 932 spin_unlock(&l3->list_lock);
@@ -934,7 +935,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
934 935
935 if (l3->shared) { 936 if (l3->shared) {
936 free_block(cachep, l3->shared->entry, 937 free_block(cachep, l3->shared->entry,
937 l3->shared->avail); 938 l3->shared->avail, node);
938 kfree(l3->shared); 939 kfree(l3->shared);
939 l3->shared = NULL; 940 l3->shared = NULL;
940 } 941 }
@@ -1882,12 +1883,13 @@ static void do_drain(void *arg)
1882{ 1883{
1883 kmem_cache_t *cachep = (kmem_cache_t*)arg; 1884 kmem_cache_t *cachep = (kmem_cache_t*)arg;
1884 struct array_cache *ac; 1885 struct array_cache *ac;
1886 int node = numa_node_id();
1885 1887
1886 check_irq_off(); 1888 check_irq_off();
1887 ac = ac_data(cachep); 1889 ac = ac_data(cachep);
1888 spin_lock(&cachep->nodelists[numa_node_id()]->list_lock); 1890 spin_lock(&cachep->nodelists[node]->list_lock);
1889 free_block(cachep, ac->entry, ac->avail); 1891 free_block(cachep, ac->entry, ac->avail, node);
1890 spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock); 1892 spin_unlock(&cachep->nodelists[node]->list_lock);
1891 ac->avail = 0; 1893 ac->avail = 0;
1892} 1894}
1893 1895
@@ -2608,7 +2610,7 @@ done:
2608/* 2610/*
2609 * Caller needs to acquire correct kmem_list's list_lock 2611 * Caller needs to acquire correct kmem_list's list_lock
2610 */ 2612 */
2611static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) 2613static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
2612{ 2614{
2613 int i; 2615 int i;
2614 struct kmem_list3 *l3; 2616 struct kmem_list3 *l3;
@@ -2617,14 +2619,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
2617 void *objp = objpp[i]; 2619 void *objp = objpp[i];
2618 struct slab *slabp; 2620 struct slab *slabp;
2619 unsigned int objnr; 2621 unsigned int objnr;
2620 int nodeid = 0;
2621 2622
2622 slabp = GET_PAGE_SLAB(virt_to_page(objp)); 2623 slabp = GET_PAGE_SLAB(virt_to_page(objp));
2623 nodeid = slabp->nodeid; 2624 l3 = cachep->nodelists[node];
2624 l3 = cachep->nodelists[nodeid];
2625 list_del(&slabp->list); 2625 list_del(&slabp->list);
2626 objnr = (objp - slabp->s_mem) / cachep->objsize; 2626 objnr = (objp - slabp->s_mem) / cachep->objsize;
2627 check_spinlock_acquired_node(cachep, nodeid); 2627 check_spinlock_acquired_node(cachep, node);
2628 check_slabp(cachep, slabp); 2628 check_slabp(cachep, slabp);
2629 2629
2630 2630
@@ -2664,13 +2664,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2664{ 2664{
2665 int batchcount; 2665 int batchcount;
2666 struct kmem_list3 *l3; 2666 struct kmem_list3 *l3;
2667 int node = numa_node_id();
2667 2668
2668 batchcount = ac->batchcount; 2669 batchcount = ac->batchcount;
2669#if DEBUG 2670#if DEBUG
2670 BUG_ON(!batchcount || batchcount > ac->avail); 2671 BUG_ON(!batchcount || batchcount > ac->avail);
2671#endif 2672#endif
2672 check_irq_off(); 2673 check_irq_off();
2673 l3 = cachep->nodelists[numa_node_id()]; 2674 l3 = cachep->nodelists[node];
2674 spin_lock(&l3->list_lock); 2675 spin_lock(&l3->list_lock);
2675 if (l3->shared) { 2676 if (l3->shared) {
2676 struct array_cache *shared_array = l3->shared; 2677 struct array_cache *shared_array = l3->shared;
@@ -2686,7 +2687,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2686 } 2687 }
2687 } 2688 }
2688 2689
2689 free_block(cachep, ac->entry, batchcount); 2690 free_block(cachep, ac->entry, batchcount, node);
2690free_done: 2691free_done:
2691#if STATS 2692#if STATS
2692 { 2693 {
@@ -2751,7 +2752,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2751 } else { 2752 } else {
2752 spin_lock(&(cachep->nodelists[nodeid])-> 2753 spin_lock(&(cachep->nodelists[nodeid])->
2753 list_lock); 2754 list_lock);
2754 free_block(cachep, &objp, 1); 2755 free_block(cachep, &objp, 1, nodeid);
2755 spin_unlock(&(cachep->nodelists[nodeid])-> 2756 spin_unlock(&(cachep->nodelists[nodeid])->
2756 list_lock); 2757 list_lock);
2757 } 2758 }
@@ -2844,7 +2845,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
2844 unsigned long save_flags; 2845 unsigned long save_flags;
2845 void *ptr; 2846 void *ptr;
2846 2847
2847 if (nodeid == numa_node_id() || nodeid == -1) 2848 if (nodeid == -1)
2848 return __cache_alloc(cachep, flags); 2849 return __cache_alloc(cachep, flags);
2849 2850
2850 if (unlikely(!cachep->nodelists[nodeid])) { 2851 if (unlikely(!cachep->nodelists[nodeid])) {
@@ -3079,7 +3080,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
3079 3080
3080 if ((nc = cachep->nodelists[node]->shared)) 3081 if ((nc = cachep->nodelists[node]->shared))
3081 free_block(cachep, nc->entry, 3082 free_block(cachep, nc->entry,
3082 nc->avail); 3083 nc->avail, node);
3083 3084
3084 l3->shared = new; 3085 l3->shared = new;
3085 if (!cachep->nodelists[node]->alien) { 3086 if (!cachep->nodelists[node]->alien) {
@@ -3160,7 +3161,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
3160 if (!ccold) 3161 if (!ccold)
3161 continue; 3162 continue;
3162 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3163 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3163 free_block(cachep, ccold->entry, ccold->avail); 3164 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3164 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3165 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3165 kfree(ccold); 3166 kfree(ccold);
3166 } 3167 }
@@ -3240,7 +3241,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
3240 if (tofree > ac->avail) { 3241 if (tofree > ac->avail) {
3241 tofree = (ac->avail+1)/2; 3242 tofree = (ac->avail+1)/2;
3242 } 3243 }
3243 free_block(cachep, ac->entry, tofree); 3244 free_block(cachep, ac->entry, tofree, node);
3244 ac->avail -= tofree; 3245 ac->avail -= tofree;
3245 memmove(ac->entry, &(ac->entry[tofree]), 3246 memmove(ac->entry, &(ac->entry[tofree]),
3246 sizeof(void*)*ac->avail); 3247 sizeof(void*)*ac->avail);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0184f510aace..1dcaeda039f4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1381,6 +1381,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1381 error = bd_claim(bdev, sys_swapon); 1381 error = bd_claim(bdev, sys_swapon);
1382 if (error < 0) { 1382 if (error < 0) {
1383 bdev = NULL; 1383 bdev = NULL;
1384 error = -EINVAL;
1384 goto bad_swap; 1385 goto bad_swap;
1385 } 1386 }
1386 p->old_block_size = block_size(bdev); 1387 p->old_block_size = block_size(bdev);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a740778f688d..0ea71e887bb6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1258,9 +1258,9 @@ void wakeup_kswapd(struct zone *zone, int order)
1258 pgdat->kswapd_max_order = order; 1258 pgdat->kswapd_max_order = order;
1259 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1259 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1260 return; 1260 return;
1261 if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) 1261 if (!waitqueue_active(&pgdat->kswapd_wait))
1262 return; 1262 return;
1263 wake_up_interruptible(&zone->zone_pgdat->kswapd_wait); 1263 wake_up_interruptible(&pgdat->kswapd_wait);
1264} 1264}
1265 1265
1266#ifdef CONFIG_PM 1266#ifdef CONFIG_PM