aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig6
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/kmemleak.c5
-rw-r--r--mm/ksm.c10
-rw-r--r--mm/memory.c11
-rw-r--r--mm/mempolicy.c13
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/percpu.c5
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/vmalloc.c50
-rw-r--r--mm/vmscan.c14
12 files changed, 79 insertions, 55 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index edd300aca173..f791196cee8c 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -67,7 +67,7 @@ config DISCONTIGMEM
67 67
68config SPARSEMEM 68config SPARSEMEM
69 def_bool y 69 def_bool y
70 depends on SPARSEMEM_MANUAL 70 depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
71 71
72config FLATMEM 72config FLATMEM
73 def_bool y 73 def_bool y
@@ -224,7 +224,9 @@ config KSM
224 the many instances by a single resident page with that content, so 224 the many instances by a single resident page with that content, so
225 saving memory until one or another app needs to modify the content. 225 saving memory until one or another app needs to modify the content.
226 Recommended for use with KVM, or with other duplicative applications. 226 Recommended for use with KVM, or with other duplicative applications.
227 See Documentation/vm/ksm.txt for more information. 227 See Documentation/vm/ksm.txt for more information: KSM is inactive
228 until a program has madvised that an area is MADV_MERGEABLE, and
229 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
228 230
229config DEFAULT_MMAP_MIN_ADDR 231config DEFAULT_MMAP_MIN_ADDR
230 int "Low address space to protect from user allocation" 232 int "Low address space to protect from user allocation"
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 3d3accb1f800..5a37e2055717 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -92,7 +92,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
92 "BdiDirtyThresh: %8lu kB\n" 92 "BdiDirtyThresh: %8lu kB\n"
93 "DirtyThresh: %8lu kB\n" 93 "DirtyThresh: %8lu kB\n"
94 "BackgroundThresh: %8lu kB\n" 94 "BackgroundThresh: %8lu kB\n"
95 "WriteBack threads:%8lu\n" 95 "WritebackThreads: %8lu\n"
96 "b_dirty: %8lu\n" 96 "b_dirty: %8lu\n"
97 "b_io: %8lu\n" 97 "b_io: %8lu\n"
98 "b_more_io: %8lu\n" 98 "b_more_io: %8lu\n"
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 4ea4510e2996..8bf765c4f58d 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -833,12 +833,15 @@ static void early_alloc(struct early_log *log)
833 */ 833 */
834 rcu_read_lock(); 834 rcu_read_lock();
835 object = create_object((unsigned long)log->ptr, log->size, 835 object = create_object((unsigned long)log->ptr, log->size,
836 log->min_count, GFP_KERNEL); 836 log->min_count, GFP_ATOMIC);
837 if (!object)
838 goto out;
837 spin_lock_irqsave(&object->lock, flags); 839 spin_lock_irqsave(&object->lock, flags);
838 for (i = 0; i < log->trace_len; i++) 840 for (i = 0; i < log->trace_len; i++)
839 object->trace[i] = log->trace[i]; 841 object->trace[i] = log->trace[i];
840 object->trace_len = log->trace_len; 842 object->trace_len = log->trace_len;
841 spin_unlock_irqrestore(&object->lock, flags); 843 spin_unlock_irqrestore(&object->lock, flags);
844out:
842 rcu_read_unlock(); 845 rcu_read_unlock();
843} 846}
844 847
diff --git a/mm/ksm.c b/mm/ksm.c
index f7edac356f46..bef1af4f77e3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock);
184 sizeof(struct __struct), __alignof__(struct __struct),\ 184 sizeof(struct __struct), __alignof__(struct __struct),\
185 (__flags), NULL) 185 (__flags), NULL)
186 186
187static void __init ksm_init_max_kernel_pages(void)
188{
189 ksm_max_kernel_pages = nr_free_buffer_pages() / 4;
190}
191
192static int __init ksm_slab_init(void) 187static int __init ksm_slab_init(void)
193{ 188{
194 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 189 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1673,7 +1668,7 @@ static int __init ksm_init(void)
1673 struct task_struct *ksm_thread; 1668 struct task_struct *ksm_thread;
1674 int err; 1669 int err;
1675 1670
1676 ksm_init_max_kernel_pages(); 1671 ksm_max_kernel_pages = totalram_pages / 4;
1677 1672
1678 err = ksm_slab_init(); 1673 err = ksm_slab_init();
1679 if (err) 1674 if (err)
@@ -1697,6 +1692,9 @@ static int __init ksm_init(void)
1697 kthread_stop(ksm_thread); 1692 kthread_stop(ksm_thread);
1698 goto out_free2; 1693 goto out_free2;
1699 } 1694 }
1695#else
1696 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
1697
1700#endif /* CONFIG_SYSFS */ 1698#endif /* CONFIG_SYSFS */
1701 1699
1702 return 0; 1700 return 0;
diff --git a/mm/memory.c b/mm/memory.c
index 7a3b0ad5594a..6ab19dd4a199 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -641,6 +641,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
641 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 641 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
642 unsigned long addr, unsigned long end) 642 unsigned long addr, unsigned long end)
643{ 643{
644 pte_t *orig_src_pte, *orig_dst_pte;
644 pte_t *src_pte, *dst_pte; 645 pte_t *src_pte, *dst_pte;
645 spinlock_t *src_ptl, *dst_ptl; 646 spinlock_t *src_ptl, *dst_ptl;
646 int progress = 0; 647 int progress = 0;
@@ -654,6 +655,8 @@ again:
654 src_pte = pte_offset_map_nested(src_pmd, addr); 655 src_pte = pte_offset_map_nested(src_pmd, addr);
655 src_ptl = pte_lockptr(src_mm, src_pmd); 656 src_ptl = pte_lockptr(src_mm, src_pmd);
656 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 657 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
658 orig_src_pte = src_pte;
659 orig_dst_pte = dst_pte;
657 arch_enter_lazy_mmu_mode(); 660 arch_enter_lazy_mmu_mode();
658 661
659 do { 662 do {
@@ -677,9 +680,9 @@ again:
677 680
678 arch_leave_lazy_mmu_mode(); 681 arch_leave_lazy_mmu_mode();
679 spin_unlock(src_ptl); 682 spin_unlock(src_ptl);
680 pte_unmap_nested(src_pte - 1); 683 pte_unmap_nested(orig_src_pte);
681 add_mm_rss(dst_mm, rss[0], rss[1]); 684 add_mm_rss(dst_mm, rss[0], rss[1]);
682 pte_unmap_unlock(dst_pte - 1, dst_ptl); 685 pte_unmap_unlock(orig_dst_pte, dst_ptl);
683 cond_resched(); 686 cond_resched();
684 if (addr != end) 687 if (addr != end)
685 goto again; 688 goto again;
@@ -1820,10 +1823,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1820 token = pmd_pgtable(*pmd); 1823 token = pmd_pgtable(*pmd);
1821 1824
1822 do { 1825 do {
1823 err = fn(pte, token, addr, data); 1826 err = fn(pte++, token, addr, data);
1824 if (err) 1827 if (err)
1825 break; 1828 break;
1826 } while (pte++, addr += PAGE_SIZE, addr != end); 1829 } while (addr += PAGE_SIZE, addr != end);
1827 1830
1828 arch_leave_lazy_mmu_mode(); 1831 arch_leave_lazy_mmu_mode();
1829 1832
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7dd9d9f80694..4545d5944243 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1024,7 +1024,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1024 1024
1025 err = migrate_prep(); 1025 err = migrate_prep();
1026 if (err) 1026 if (err)
1027 return err; 1027 goto mpol_out;
1028 } 1028 }
1029 { 1029 {
1030 NODEMASK_SCRATCH(scratch); 1030 NODEMASK_SCRATCH(scratch);
@@ -1039,10 +1039,9 @@ static long do_mbind(unsigned long start, unsigned long len,
1039 err = -ENOMEM; 1039 err = -ENOMEM;
1040 NODEMASK_SCRATCH_FREE(scratch); 1040 NODEMASK_SCRATCH_FREE(scratch);
1041 } 1041 }
1042 if (err) { 1042 if (err)
1043 mpol_put(new); 1043 goto mpol_out;
1044 return err; 1044
1045 }
1046 vma = check_range(mm, start, end, nmask, 1045 vma = check_range(mm, start, end, nmask,
1047 flags | MPOL_MF_INVERT, &pagelist); 1046 flags | MPOL_MF_INVERT, &pagelist);
1048 1047
@@ -1058,9 +1057,11 @@ static long do_mbind(unsigned long start, unsigned long len,
1058 1057
1059 if (!err && nr_failed && (flags & MPOL_MF_STRICT)) 1058 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1060 err = -EIO; 1059 err = -EIO;
1061 } 1060 } else
1061 putback_lru_pages(&pagelist);
1062 1062
1063 up_write(&mm->mmap_sem); 1063 up_write(&mm->mmap_sem);
1064 mpol_out:
1064 mpol_put(new); 1065 mpol_put(new);
1065 return err; 1066 return err;
1066} 1067}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a3b14090b1fb..2c5d79236ead 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -566,7 +566,8 @@ static void balance_dirty_pages(struct address_space *mapping,
566 if (pages_written >= write_chunk) 566 if (pages_written >= write_chunk)
567 break; /* We've done our duty */ 567 break; /* We've done our duty */
568 568
569 schedule_timeout_interruptible(pause); 569 __set_current_state(TASK_INTERRUPTIBLE);
570 io_schedule_timeout(pause);
570 571
571 /* 572 /*
572 * Increase the delay for each loop, up to our previous 573 * Increase the delay for each loop, up to our previous
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bf720550b44d..cdcedf661616 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2183,7 +2183,7 @@ void show_free_areas(void)
2183 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 2183 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2184 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 2184 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2185 " unevictable:%lu" 2185 " unevictable:%lu"
2186 " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" 2186 " dirty:%lu writeback:%lu unstable:%lu\n"
2187 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" 2187 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2188 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", 2188 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2189 global_page_state(NR_ACTIVE_ANON), 2189 global_page_state(NR_ACTIVE_ANON),
@@ -2196,7 +2196,6 @@ void show_free_areas(void)
2196 global_page_state(NR_FILE_DIRTY), 2196 global_page_state(NR_FILE_DIRTY),
2197 global_page_state(NR_WRITEBACK), 2197 global_page_state(NR_WRITEBACK),
2198 global_page_state(NR_UNSTABLE_NFS), 2198 global_page_state(NR_UNSTABLE_NFS),
2199 nr_blockdev_pages(),
2200 global_page_state(NR_FREE_PAGES), 2199 global_page_state(NR_FREE_PAGES),
2201 global_page_state(NR_SLAB_RECLAIMABLE), 2200 global_page_state(NR_SLAB_RECLAIMABLE),
2202 global_page_state(NR_SLAB_UNRECLAIMABLE), 2201 global_page_state(NR_SLAB_UNRECLAIMABLE),
diff --git a/mm/percpu.c b/mm/percpu.c
index 4a048abad043..6af78c1ee704 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1870,13 +1870,14 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1870 max_distance = 0; 1870 max_distance = 0;
1871 for (group = 0; group < ai->nr_groups; group++) { 1871 for (group = 0; group < ai->nr_groups; group++) {
1872 ai->groups[group].base_offset = areas[group] - base; 1872 ai->groups[group].base_offset = areas[group] - base;
1873 max_distance = max(max_distance, ai->groups[group].base_offset); 1873 max_distance = max_t(size_t, max_distance,
1874 ai->groups[group].base_offset);
1874 } 1875 }
1875 max_distance += ai->unit_size; 1876 max_distance += ai->unit_size;
1876 1877
1877 /* warn if maximum distance is further than 75% of vmalloc space */ 1878 /* warn if maximum distance is further than 75% of vmalloc space */
1878 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { 1879 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1879 pr_warning("PERCPU: max_distance=0x%lx too large for vmalloc " 1880 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1880 "space 0x%lx\n", 1881 "space 0x%lx\n",
1881 max_distance, VMALLOC_END - VMALLOC_START); 1882 max_distance, VMALLOC_END - VMALLOC_START);
1882#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1883#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4de7f02f820b..a1bc6b9af9a2 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1974,12 +1974,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1974 goto bad_swap; 1974 goto bad_swap;
1975 } 1975 }
1976 1976
1977 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 1977 if (p->bdev) {
1978 p->flags |= SWP_SOLIDSTATE; 1978 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
1979 p->cluster_next = 1 + (random32() % p->highest_bit); 1979 p->flags |= SWP_SOLIDSTATE;
1980 p->cluster_next = 1 + (random32() % p->highest_bit);
1981 }
1982 if (discard_swap(p) == 0)
1983 p->flags |= SWP_DISCARDABLE;
1980 } 1984 }
1981 if (discard_swap(p) == 0)
1982 p->flags |= SWP_DISCARDABLE;
1983 1985
1984 mutex_lock(&swapon_mutex); 1986 mutex_lock(&swapon_mutex);
1985 spin_lock(&swap_lock); 1987 spin_lock(&swap_lock);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 69511e663234..0f551a4a44cd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/highmem.h> 14#include <linux/highmem.h>
15#include <linux/sched.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/spinlock.h> 17#include <linux/spinlock.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
@@ -25,10 +26,10 @@
25#include <linux/rcupdate.h> 26#include <linux/rcupdate.h>
26#include <linux/pfn.h> 27#include <linux/pfn.h>
27#include <linux/kmemleak.h> 28#include <linux/kmemleak.h>
28#include <linux/highmem.h>
29#include <asm/atomic.h> 29#include <asm/atomic.h>
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
32#include <asm/shmparam.h>
32 33
33 34
34/*** Page table manipulation functions ***/ 35/*** Page table manipulation functions ***/
@@ -1156,12 +1157,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1156} 1157}
1157 1158
1158static struct vm_struct *__get_vm_area_node(unsigned long size, 1159static struct vm_struct *__get_vm_area_node(unsigned long size,
1159 unsigned long flags, unsigned long start, unsigned long end, 1160 unsigned long align, unsigned long flags, unsigned long start,
1160 int node, gfp_t gfp_mask, void *caller) 1161 unsigned long end, int node, gfp_t gfp_mask, void *caller)
1161{ 1162{
1162 static struct vmap_area *va; 1163 static struct vmap_area *va;
1163 struct vm_struct *area; 1164 struct vm_struct *area;
1164 unsigned long align = 1;
1165 1165
1166 BUG_ON(in_interrupt()); 1166 BUG_ON(in_interrupt());
1167 if (flags & VM_IOREMAP) { 1167 if (flags & VM_IOREMAP) {
@@ -1201,7 +1201,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
1201struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1201struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1202 unsigned long start, unsigned long end) 1202 unsigned long start, unsigned long end)
1203{ 1203{
1204 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1204 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
1205 __builtin_return_address(0)); 1205 __builtin_return_address(0));
1206} 1206}
1207EXPORT_SYMBOL_GPL(__get_vm_area); 1207EXPORT_SYMBOL_GPL(__get_vm_area);
@@ -1210,7 +1210,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1210 unsigned long start, unsigned long end, 1210 unsigned long start, unsigned long end,
1211 void *caller) 1211 void *caller)
1212{ 1212{
1213 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1213 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
1214 caller); 1214 caller);
1215} 1215}
1216 1216
@@ -1225,22 +1225,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1225 */ 1225 */
1226struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1226struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1227{ 1227{
1228 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1228 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1229 -1, GFP_KERNEL, __builtin_return_address(0)); 1229 -1, GFP_KERNEL, __builtin_return_address(0));
1230} 1230}
1231 1231
1232struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1232struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1233 void *caller) 1233 void *caller)
1234{ 1234{
1235 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1235 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1236 -1, GFP_KERNEL, caller); 1236 -1, GFP_KERNEL, caller);
1237} 1237}
1238 1238
1239struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 1239struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1240 int node, gfp_t gfp_mask) 1240 int node, gfp_t gfp_mask)
1241{ 1241{
1242 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 1242 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1243 gfp_mask, __builtin_return_address(0)); 1243 node, gfp_mask, __builtin_return_address(0));
1244} 1244}
1245 1245
1246static struct vm_struct *find_vm_area(const void *addr) 1246static struct vm_struct *find_vm_area(const void *addr)
@@ -1403,7 +1403,8 @@ void *vmap(struct page **pages, unsigned int count,
1403} 1403}
1404EXPORT_SYMBOL(vmap); 1404EXPORT_SYMBOL(vmap);
1405 1405
1406static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1406static void *__vmalloc_node(unsigned long size, unsigned long align,
1407 gfp_t gfp_mask, pgprot_t prot,
1407 int node, void *caller); 1408 int node, void *caller);
1408static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1409static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1409 pgprot_t prot, int node, void *caller) 1410 pgprot_t prot, int node, void *caller)
@@ -1417,7 +1418,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1417 area->nr_pages = nr_pages; 1418 area->nr_pages = nr_pages;
1418 /* Please note that the recursion is strictly bounded. */ 1419 /* Please note that the recursion is strictly bounded. */
1419 if (array_size > PAGE_SIZE) { 1420 if (array_size > PAGE_SIZE) {
1420 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 1421 pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
1421 PAGE_KERNEL, node, caller); 1422 PAGE_KERNEL, node, caller);
1422 area->flags |= VM_VPAGES; 1423 area->flags |= VM_VPAGES;
1423 } else { 1424 } else {
@@ -1476,6 +1477,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1476/** 1477/**
1477 * __vmalloc_node - allocate virtually contiguous memory 1478 * __vmalloc_node - allocate virtually contiguous memory
1478 * @size: allocation size 1479 * @size: allocation size
1480 * @align: desired alignment
1479 * @gfp_mask: flags for the page level allocator 1481 * @gfp_mask: flags for the page level allocator
1480 * @prot: protection mask for the allocated pages 1482 * @prot: protection mask for the allocated pages
1481 * @node: node to use for allocation or -1 1483 * @node: node to use for allocation or -1
@@ -1485,8 +1487,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1485 * allocator with @gfp_mask flags. Map them into contiguous 1487 * allocator with @gfp_mask flags. Map them into contiguous
1486 * kernel virtual space, using a pagetable protection of @prot. 1488 * kernel virtual space, using a pagetable protection of @prot.
1487 */ 1489 */
1488static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1490static void *__vmalloc_node(unsigned long size, unsigned long align,
1489 int node, void *caller) 1491 gfp_t gfp_mask, pgprot_t prot,
1492 int node, void *caller)
1490{ 1493{
1491 struct vm_struct *area; 1494 struct vm_struct *area;
1492 void *addr; 1495 void *addr;
@@ -1496,8 +1499,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1496 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1499 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1497 return NULL; 1500 return NULL;
1498 1501
1499 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 1502 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
1500 node, gfp_mask, caller); 1503 VMALLOC_END, node, gfp_mask, caller);
1501 1504
1502 if (!area) 1505 if (!area)
1503 return NULL; 1506 return NULL;
@@ -1516,7 +1519,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1516 1519
1517void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1520void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1518{ 1521{
1519 return __vmalloc_node(size, gfp_mask, prot, -1, 1522 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
1520 __builtin_return_address(0)); 1523 __builtin_return_address(0));
1521} 1524}
1522EXPORT_SYMBOL(__vmalloc); 1525EXPORT_SYMBOL(__vmalloc);
@@ -1532,7 +1535,7 @@ EXPORT_SYMBOL(__vmalloc);
1532 */ 1535 */
1533void *vmalloc(unsigned long size) 1536void *vmalloc(unsigned long size)
1534{ 1537{
1535 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1538 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1536 -1, __builtin_return_address(0)); 1539 -1, __builtin_return_address(0));
1537} 1540}
1538EXPORT_SYMBOL(vmalloc); 1541EXPORT_SYMBOL(vmalloc);
@@ -1549,7 +1552,8 @@ void *vmalloc_user(unsigned long size)
1549 struct vm_struct *area; 1552 struct vm_struct *area;
1550 void *ret; 1553 void *ret;
1551 1554
1552 ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1555 ret = __vmalloc_node(size, SHMLBA,
1556 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1553 PAGE_KERNEL, -1, __builtin_return_address(0)); 1557 PAGE_KERNEL, -1, __builtin_return_address(0));
1554 if (ret) { 1558 if (ret) {
1555 area = find_vm_area(ret); 1559 area = find_vm_area(ret);
@@ -1572,7 +1576,7 @@ EXPORT_SYMBOL(vmalloc_user);
1572 */ 1576 */
1573void *vmalloc_node(unsigned long size, int node) 1577void *vmalloc_node(unsigned long size, int node)
1574{ 1578{
1575 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1579 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1576 node, __builtin_return_address(0)); 1580 node, __builtin_return_address(0));
1577} 1581}
1578EXPORT_SYMBOL(vmalloc_node); 1582EXPORT_SYMBOL(vmalloc_node);
@@ -1595,7 +1599,7 @@ EXPORT_SYMBOL(vmalloc_node);
1595 1599
1596void *vmalloc_exec(unsigned long size) 1600void *vmalloc_exec(unsigned long size)
1597{ 1601{
1598 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1602 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1599 -1, __builtin_return_address(0)); 1603 -1, __builtin_return_address(0));
1600} 1604}
1601 1605
@@ -1616,7 +1620,7 @@ void *vmalloc_exec(unsigned long size)
1616 */ 1620 */
1617void *vmalloc_32(unsigned long size) 1621void *vmalloc_32(unsigned long size)
1618{ 1622{
1619 return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, 1623 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1620 -1, __builtin_return_address(0)); 1624 -1, __builtin_return_address(0));
1621} 1625}
1622EXPORT_SYMBOL(vmalloc_32); 1626EXPORT_SYMBOL(vmalloc_32);
@@ -1633,7 +1637,7 @@ void *vmalloc_32_user(unsigned long size)
1633 struct vm_struct *area; 1637 struct vm_struct *area;
1634 void *ret; 1638 void *ret;
1635 1639
1636 ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1640 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1637 -1, __builtin_return_address(0)); 1641 -1, __builtin_return_address(0));
1638 if (ret) { 1642 if (ret) {
1639 area = find_vm_area(ret); 1643 area = find_vm_area(ret);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 64e438898832..777af57fd8c8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -544,6 +544,16 @@ redo:
544 */ 544 */
545 lru = LRU_UNEVICTABLE; 545 lru = LRU_UNEVICTABLE;
546 add_page_to_unevictable_list(page); 546 add_page_to_unevictable_list(page);
547 /*
548 * When racing with an mlock clearing (page is
549 * unlocked), make sure that if the other thread does
550 * not observe our setting of PG_lru and fails
551 * isolation, we see PG_mlocked cleared below and move
552 * the page back to the evictable list.
553 *
554 * The other side is TestClearPageMlocked().
555 */
556 smp_mb();
547 } 557 }
548 558
549 /* 559 /*
@@ -1088,7 +1098,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1088 int lumpy_reclaim = 0; 1098 int lumpy_reclaim = 0;
1089 1099
1090 while (unlikely(too_many_isolated(zone, file, sc))) { 1100 while (unlikely(too_many_isolated(zone, file, sc))) {
1091 congestion_wait(WRITE, HZ/10); 1101 congestion_wait(BLK_RW_ASYNC, HZ/10);
1092 1102
1093 /* We are about to die and free our memory. Return now. */ 1103 /* We are about to die and free our memory. Return now. */
1094 if (fatal_signal_pending(current)) 1104 if (fatal_signal_pending(current))
@@ -1356,7 +1366,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1356 * IO, plus JVM can create lots of anon VM_EXEC pages, 1366 * IO, plus JVM can create lots of anon VM_EXEC pages,
1357 * so we ignore them here. 1367 * so we ignore them here.
1358 */ 1368 */
1359 if ((vm_flags & VM_EXEC) && !PageAnon(page)) { 1369 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1360 list_add(&page->lru, &l_active); 1370 list_add(&page->lru, &l_active);
1361 continue; 1371 continue;
1362 } 1372 }