aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/Makefile1
-rw-r--r--mm/filemap.c16
-rw-r--r--mm/filemap_xip.c3
-rw-r--r--mm/fremap.c3
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/madvise.c4
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/memory.c60
-rw-r--r--mm/mmap.c160
-rw-r--r--mm/mmu_notifier.c277
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/mremap.c6
-rw-r--r--mm/page_alloc.c17
-rw-r--r--mm/rmap.c16
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/swap.c7
-rw-r--r--mm/swapfile.c4
-rw-r--r--mm/vmscan.c4
19 files changed, 547 insertions, 47 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index efee5d379df..446c6588c75 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -208,3 +208,6 @@ config NR_QUICK
208config VIRT_TO_BUS 208config VIRT_TO_BUS
209 def_bool y 209 def_bool y
210 depends on !ARCH_NO_VIRT_TO_BUS 210 depends on !ARCH_NO_VIRT_TO_BUS
211
212config MMU_NOTIFIER
213 bool
diff --git a/mm/Makefile b/mm/Makefile
index 06ca2381fef..da4ccf015ae 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_SHMEM) += shmem.o
25obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o 25obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
26obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o 26obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o
27obj-$(CONFIG_SLOB) += slob.o 27obj-$(CONFIG_SLOB) += slob.o
28obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
28obj-$(CONFIG_SLAB) += slab.o 29obj-$(CONFIG_SLAB) += slab.o
29obj-$(CONFIG_SLUB) += slub.o 30obj-$(CONFIG_SLUB) += slub.o
30obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 31obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
diff --git a/mm/filemap.c b/mm/filemap.c
index 5de7633e1db..d97d1ad5547 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1023,8 +1023,17 @@ find_page:
1023 ra, filp, page, 1023 ra, filp, page,
1024 index, last_index - index); 1024 index, last_index - index);
1025 } 1025 }
1026 if (!PageUptodate(page)) 1026 if (!PageUptodate(page)) {
1027 goto page_not_up_to_date; 1027 if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1028 !mapping->a_ops->is_partially_uptodate)
1029 goto page_not_up_to_date;
1030 if (TestSetPageLocked(page))
1031 goto page_not_up_to_date;
1032 if (!mapping->a_ops->is_partially_uptodate(page,
1033 desc, offset))
1034 goto page_not_up_to_date_locked;
1035 unlock_page(page);
1036 }
1028page_ok: 1037page_ok:
1029 /* 1038 /*
1030 * i_size must be checked after we know the page is Uptodate. 1039 * i_size must be checked after we know the page is Uptodate.
@@ -1094,6 +1103,7 @@ page_not_up_to_date:
1094 if (lock_page_killable(page)) 1103 if (lock_page_killable(page))
1095 goto readpage_eio; 1104 goto readpage_eio;
1096 1105
1106page_not_up_to_date_locked:
1097 /* Did it get truncated before we got the lock? */ 1107 /* Did it get truncated before we got the lock? */
1098 if (!page->mapping) { 1108 if (!page->mapping) {
1099 unlock_page(page); 1109 unlock_page(page);
@@ -1869,7 +1879,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
1869 * The !iov->iov_len check ensures we skip over unlikely 1879 * The !iov->iov_len check ensures we skip over unlikely
1870 * zero-length segments (without overruning the iovec). 1880 * zero-length segments (without overruning the iovec).
1871 */ 1881 */
1872 while (bytes || unlikely(!iov->iov_len && i->count)) { 1882 while (bytes || unlikely(i->count && !iov->iov_len)) {
1873 int copy; 1883 int copy;
1874 1884
1875 copy = min(bytes, iov->iov_len - base); 1885 copy = min(bytes, iov->iov_len - base);
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 98a3f31ccd6..380ab402d71 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/uio.h> 14#include <linux/uio.h>
15#include <linux/rmap.h> 15#include <linux/rmap.h>
16#include <linux/mmu_notifier.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
17#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
18#include <asm/io.h> 19#include <asm/io.h>
@@ -188,7 +189,7 @@ __xip_unmap (struct address_space * mapping,
188 if (pte) { 189 if (pte) {
189 /* Nuke the page table entry. */ 190 /* Nuke the page table entry. */
190 flush_cache_page(vma, address, pte_pfn(*pte)); 191 flush_cache_page(vma, address, pte_pfn(*pte));
191 pteval = ptep_clear_flush(vma, address, pte); 192 pteval = ptep_clear_flush_notify(vma, address, pte);
192 page_remove_rmap(page, vma); 193 page_remove_rmap(page, vma);
193 dec_mm_counter(mm, file_rss); 194 dec_mm_counter(mm, file_rss);
194 BUG_ON(pte_dirty(pteval)); 195 BUG_ON(pte_dirty(pteval));
diff --git a/mm/fremap.c b/mm/fremap.c
index 07a9c82ce1a..7881638e4a1 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -15,6 +15,7 @@
15#include <linux/rmap.h> 15#include <linux/rmap.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/syscalls.h> 17#include <linux/syscalls.h>
18#include <linux/mmu_notifier.h>
18 19
19#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
20#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
@@ -214,7 +215,9 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
214 spin_unlock(&mapping->i_mmap_lock); 215 spin_unlock(&mapping->i_mmap_lock);
215 } 216 }
216 217
218 mmu_notifier_invalidate_range_start(mm, start, start + size);
217 err = populate_range(mm, vma, start, size, pgoff); 219 err = populate_range(mm, vma, start, size, pgoff);
220 mmu_notifier_invalidate_range_end(mm, start, start + size);
218 if (!err && !(flags & MAP_NONBLOCK)) { 221 if (!err && !(flags & MAP_NONBLOCK)) {
219 if (unlikely(has_write_lock)) { 222 if (unlikely(has_write_lock)) {
220 downgrade_write(&mm->mmap_sem); 223 downgrade_write(&mm->mmap_sem);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b3c78640b62..d237a02eb22 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -9,6 +9,7 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/sysctl.h> 10#include <linux/sysctl.h>
11#include <linux/highmem.h> 11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
12#include <linux/nodemask.h> 13#include <linux/nodemask.h>
13#include <linux/pagemap.h> 14#include <linux/pagemap.h>
14#include <linux/mempolicy.h> 15#include <linux/mempolicy.h>
@@ -19,6 +20,7 @@
19#include <asm/io.h> 20#include <asm/io.h>
20#include <asm/page.h> 21#include <asm/page.h>
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
23#include <asm/io.h>
22 24
23#include <linux/hugetlb.h> 25#include <linux/hugetlb.h>
24#include "internal.h" 26#include "internal.h"
@@ -1672,6 +1674,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1672 BUG_ON(start & ~huge_page_mask(h)); 1674 BUG_ON(start & ~huge_page_mask(h));
1673 BUG_ON(end & ~huge_page_mask(h)); 1675 BUG_ON(end & ~huge_page_mask(h));
1674 1676
1677 mmu_notifier_invalidate_range_start(mm, start, end);
1675 spin_lock(&mm->page_table_lock); 1678 spin_lock(&mm->page_table_lock);
1676 for (address = start; address < end; address += sz) { 1679 for (address = start; address < end; address += sz) {
1677 ptep = huge_pte_offset(mm, address); 1680 ptep = huge_pte_offset(mm, address);
@@ -1713,6 +1716,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1713 } 1716 }
1714 spin_unlock(&mm->page_table_lock); 1717 spin_unlock(&mm->page_table_lock);
1715 flush_tlb_range(vma, start, end); 1718 flush_tlb_range(vma, start, end);
1719 mmu_notifier_invalidate_range_end(mm, start, end);
1716 list_for_each_entry_safe(page, tmp, &page_list, lru) { 1720 list_for_each_entry_safe(page, tmp, &page_list, lru) {
1717 list_del(&page->lru); 1721 list_del(&page->lru);
1718 put_page(page); 1722 put_page(page);
diff --git a/mm/madvise.c b/mm/madvise.c
index 23a0ec3e0ea..f9349c18a1b 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -132,10 +132,10 @@ static long madvise_willneed(struct vm_area_struct * vma,
132 * Application no longer needs these pages. If the pages are dirty, 132 * Application no longer needs these pages. If the pages are dirty,
133 * it's OK to just throw them away. The app will be more careful about 133 * it's OK to just throw them away. The app will be more careful about
134 * data it wants to keep. Be sure to free swap resources too. The 134 * data it wants to keep. Be sure to free swap resources too. The
135 * zap_page_range call sets things up for refill_inactive to actually free 135 * zap_page_range call sets things up for shrink_active_list to actually free
136 * these pages later if no one else has touched them in the meantime, 136 * these pages later if no one else has touched them in the meantime,
137 * although we could add these pages to a global reuse list for 137 * although we could add these pages to a global reuse list for
138 * refill_inactive to pick up before reclaiming other pages. 138 * shrink_active_list to pick up before reclaiming other pages.
139 * 139 *
140 * NB: This interface discards data rather than pushes it out to swap, 140 * NB: This interface discards data rather than pushes it out to swap,
141 * as some implementations do. This has performance implications for 141 * as some implementations do. This has performance implications for
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fba566c5132..7056c3bdb47 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1168,9 +1168,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1168 mem = mem_cgroup_from_cont(cont); 1168 mem = mem_cgroup_from_cont(cont);
1169 old_mem = mem_cgroup_from_cont(old_cont); 1169 old_mem = mem_cgroup_from_cont(old_cont);
1170 1170
1171 if (mem == old_mem)
1172 goto out;
1173
1174 /* 1171 /*
1175 * Only thread group leaders are allowed to migrate, the mm_struct is 1172 * Only thread group leaders are allowed to migrate, the mm_struct is
1176 * in effect owned by the leader 1173 * in effect owned by the leader
diff --git a/mm/memory.c b/mm/memory.c
index a8ca04faaea..0e4eea10c7b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -51,6 +51,7 @@
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/writeback.h> 52#include <linux/writeback.h>
53#include <linux/memcontrol.h> 53#include <linux/memcontrol.h>
54#include <linux/mmu_notifier.h>
54 55
55#include <asm/pgalloc.h> 56#include <asm/pgalloc.h>
56#include <asm/uaccess.h> 57#include <asm/uaccess.h>
@@ -652,6 +653,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
652 unsigned long next; 653 unsigned long next;
653 unsigned long addr = vma->vm_start; 654 unsigned long addr = vma->vm_start;
654 unsigned long end = vma->vm_end; 655 unsigned long end = vma->vm_end;
656 int ret;
655 657
656 /* 658 /*
657 * Don't copy ptes where a page fault will fill them correctly. 659 * Don't copy ptes where a page fault will fill them correctly.
@@ -667,17 +669,33 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
667 if (is_vm_hugetlb_page(vma)) 669 if (is_vm_hugetlb_page(vma))
668 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 670 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
669 671
672 /*
673 * We need to invalidate the secondary MMU mappings only when
674 * there could be a permission downgrade on the ptes of the
675 * parent mm. And a permission downgrade will only happen if
676 * is_cow_mapping() returns true.
677 */
678 if (is_cow_mapping(vma->vm_flags))
679 mmu_notifier_invalidate_range_start(src_mm, addr, end);
680
681 ret = 0;
670 dst_pgd = pgd_offset(dst_mm, addr); 682 dst_pgd = pgd_offset(dst_mm, addr);
671 src_pgd = pgd_offset(src_mm, addr); 683 src_pgd = pgd_offset(src_mm, addr);
672 do { 684 do {
673 next = pgd_addr_end(addr, end); 685 next = pgd_addr_end(addr, end);
674 if (pgd_none_or_clear_bad(src_pgd)) 686 if (pgd_none_or_clear_bad(src_pgd))
675 continue; 687 continue;
676 if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 688 if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
677 vma, addr, next)) 689 vma, addr, next))) {
678 return -ENOMEM; 690 ret = -ENOMEM;
691 break;
692 }
679 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 693 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
680 return 0; 694
695 if (is_cow_mapping(vma->vm_flags))
696 mmu_notifier_invalidate_range_end(src_mm,
697 vma->vm_start, end);
698 return ret;
681} 699}
682 700
683static unsigned long zap_pte_range(struct mmu_gather *tlb, 701static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -881,7 +899,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
881 unsigned long start = start_addr; 899 unsigned long start = start_addr;
882 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 900 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
883 int fullmm = (*tlbp)->fullmm; 901 int fullmm = (*tlbp)->fullmm;
902 struct mm_struct *mm = vma->vm_mm;
884 903
904 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
885 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 905 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
886 unsigned long end; 906 unsigned long end;
887 907
@@ -946,6 +966,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
946 } 966 }
947 } 967 }
948out: 968out:
969 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
949 return start; /* which is now the end (or restart) address */ 970 return start; /* which is now the end (or restart) address */
950} 971}
951 972
@@ -972,6 +993,30 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
972 tlb_finish_mmu(tlb, address, end); 993 tlb_finish_mmu(tlb, address, end);
973 return end; 994 return end;
974} 995}
996EXPORT_SYMBOL_GPL(zap_page_range);
997
998/**
999 * zap_vma_ptes - remove ptes mapping the vma
1000 * @vma: vm_area_struct holding ptes to be zapped
1001 * @address: starting address of pages to zap
1002 * @size: number of bytes to zap
1003 *
1004 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1005 *
1006 * The entire address range must be fully contained within the vma.
1007 *
1008 * Returns 0 if successful.
1009 */
1010int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1011 unsigned long size)
1012{
1013 if (address < vma->vm_start || address + size > vma->vm_end ||
1014 !(vma->vm_flags & VM_PFNMAP))
1015 return -1;
1016 zap_page_range(vma, address, size, NULL);
1017 return 0;
1018}
1019EXPORT_SYMBOL_GPL(zap_vma_ptes);
975 1020
976/* 1021/*
977 * Do a quick page-table lookup for a single page. 1022 * Do a quick page-table lookup for a single page.
@@ -1066,6 +1111,7 @@ no_page_table:
1066 } 1111 }
1067 return page; 1112 return page;
1068} 1113}
1114EXPORT_SYMBOL_GPL(follow_page);
1069 1115
1070/* Can we do the FOLL_ANON optimization? */ 1116/* Can we do the FOLL_ANON optimization? */
1071static inline int use_zero_page(struct vm_area_struct *vma) 1117static inline int use_zero_page(struct vm_area_struct *vma)
@@ -1616,10 +1662,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1616{ 1662{
1617 pgd_t *pgd; 1663 pgd_t *pgd;
1618 unsigned long next; 1664 unsigned long next;
1619 unsigned long end = addr + size; 1665 unsigned long start = addr, end = addr + size;
1620 int err; 1666 int err;
1621 1667
1622 BUG_ON(addr >= end); 1668 BUG_ON(addr >= end);
1669 mmu_notifier_invalidate_range_start(mm, start, end);
1623 pgd = pgd_offset(mm, addr); 1670 pgd = pgd_offset(mm, addr);
1624 do { 1671 do {
1625 next = pgd_addr_end(addr, end); 1672 next = pgd_addr_end(addr, end);
@@ -1627,6 +1674,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1627 if (err) 1674 if (err)
1628 break; 1675 break;
1629 } while (pgd++, addr = next, addr != end); 1676 } while (pgd++, addr = next, addr != end);
1677 mmu_notifier_invalidate_range_end(mm, start, end);
1630 return err; 1678 return err;
1631} 1679}
1632EXPORT_SYMBOL_GPL(apply_to_page_range); 1680EXPORT_SYMBOL_GPL(apply_to_page_range);
@@ -1839,7 +1887,7 @@ gotten:
1839 * seen in the presence of one thread doing SMC and another 1887 * seen in the presence of one thread doing SMC and another
1840 * thread doing COW. 1888 * thread doing COW.
1841 */ 1889 */
1842 ptep_clear_flush(vma, address, page_table); 1890 ptep_clear_flush_notify(vma, address, page_table);
1843 set_pte_at(mm, address, page_table, entry); 1891 set_pte_at(mm, address, page_table, entry);
1844 update_mmu_cache(vma, address, entry); 1892 update_mmu_cache(vma, address, entry);
1845 lru_cache_add_active(new_page); 1893 lru_cache_add_active(new_page);
diff --git a/mm/mmap.c b/mm/mmap.c
index 5e0cc99e9cd..245c3d69067 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -26,6 +26,7 @@
26#include <linux/mount.h> 26#include <linux/mount.h>
27#include <linux/mempolicy.h> 27#include <linux/mempolicy.h>
28#include <linux/rmap.h> 28#include <linux/rmap.h>
29#include <linux/mmu_notifier.h>
29 30
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
@@ -2061,6 +2062,7 @@ void exit_mmap(struct mm_struct *mm)
2061 2062
2062 /* mm's last user has gone, and its about to be pulled down */ 2063 /* mm's last user has gone, and its about to be pulled down */
2063 arch_exit_mmap(mm); 2064 arch_exit_mmap(mm);
2065 mmu_notifier_release(mm);
2064 2066
2065 lru_add_drain(); 2067 lru_add_drain();
2066 flush_cache_mm(mm); 2068 flush_cache_mm(mm);
@@ -2268,3 +2270,161 @@ int install_special_mapping(struct mm_struct *mm,
2268 2270
2269 return 0; 2271 return 0;
2270} 2272}
2273
2274static DEFINE_MUTEX(mm_all_locks_mutex);
2275
2276static void vm_lock_anon_vma(struct anon_vma *anon_vma)
2277{
2278 if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
2279 /*
2280 * The LSB of head.next can't change from under us
2281 * because we hold the mm_all_locks_mutex.
2282 */
2283 spin_lock(&anon_vma->lock);
2284 /*
2285 * We can safely modify head.next after taking the
2286 * anon_vma->lock. If some other vma in this mm shares
2287 * the same anon_vma we won't take it again.
2288 *
2289 * No need of atomic instructions here, head.next
2290 * can't change from under us thanks to the
2291 * anon_vma->lock.
2292 */
2293 if (__test_and_set_bit(0, (unsigned long *)
2294 &anon_vma->head.next))
2295 BUG();
2296 }
2297}
2298
2299static void vm_lock_mapping(struct address_space *mapping)
2300{
2301 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2302 /*
2303 * AS_MM_ALL_LOCKS can't change from under us because
2304 * we hold the mm_all_locks_mutex.
2305 *
2306 * Operations on ->flags have to be atomic because
2307 * even if AS_MM_ALL_LOCKS is stable thanks to the
2308 * mm_all_locks_mutex, there may be other cpus
2309 * changing other bitflags in parallel to us.
2310 */
2311 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2312 BUG();
2313 spin_lock(&mapping->i_mmap_lock);
2314 }
2315}
2316
2317/*
2318 * This operation locks against the VM for all pte/vma/mm related
2319 * operations that could ever happen on a certain mm. This includes
2320 * vmtruncate, try_to_unmap, and all page faults.
2321 *
2322 * The caller must take the mmap_sem in write mode before calling
2323 * mm_take_all_locks(). The caller isn't allowed to release the
2324 * mmap_sem until mm_drop_all_locks() returns.
2325 *
2326 * mmap_sem in write mode is required in order to block all operations
2327 * that could modify pagetables and free pages without need of
2328 * altering the vma layout (for example populate_range() with
2329 * nonlinear vmas). It's also needed in write mode to avoid new
2330 * anon_vmas to be associated with existing vmas.
2331 *
2332 * A single task can't take more than one mm_take_all_locks() in a row
2333 * or it would deadlock.
2334 *
2335 * The LSB in anon_vma->head.next and the AS_MM_ALL_LOCKS bitflag in
2336 * mapping->flags avoid to take the same lock twice, if more than one
2337 * vma in this mm is backed by the same anon_vma or address_space.
2338 *
2339 * We can take all the locks in random order because the VM code
2340 * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
2341 * takes more than one of them in a row. Secondly we're protected
2342 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2343 *
2344 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2345 * that may have to take thousand of locks.
2346 *
2347 * mm_take_all_locks() can fail if it's interrupted by signals.
2348 */
2349int mm_take_all_locks(struct mm_struct *mm)
2350{
2351 struct vm_area_struct *vma;
2352 int ret = -EINTR;
2353
2354 BUG_ON(down_read_trylock(&mm->mmap_sem));
2355
2356 mutex_lock(&mm_all_locks_mutex);
2357
2358 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2359 if (signal_pending(current))
2360 goto out_unlock;
2361 if (vma->anon_vma)
2362 vm_lock_anon_vma(vma->anon_vma);
2363 if (vma->vm_file && vma->vm_file->f_mapping)
2364 vm_lock_mapping(vma->vm_file->f_mapping);
2365 }
2366 ret = 0;
2367
2368out_unlock:
2369 if (ret)
2370 mm_drop_all_locks(mm);
2371
2372 return ret;
2373}
2374
2375static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2376{
2377 if (test_bit(0, (unsigned long *) &anon_vma->head.next)) {
2378 /*
2379 * The LSB of head.next can't change to 0 from under
2380 * us because we hold the mm_all_locks_mutex.
2381 *
2382 * We must however clear the bitflag before unlocking
2383 * the vma so the users using the anon_vma->head will
2384 * never see our bitflag.
2385 *
2386 * No need of atomic instructions here, head.next
2387 * can't change from under us until we release the
2388 * anon_vma->lock.
2389 */
2390 if (!__test_and_clear_bit(0, (unsigned long *)
2391 &anon_vma->head.next))
2392 BUG();
2393 spin_unlock(&anon_vma->lock);
2394 }
2395}
2396
2397static void vm_unlock_mapping(struct address_space *mapping)
2398{
2399 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2400 /*
2401 * AS_MM_ALL_LOCKS can't change to 0 from under us
2402 * because we hold the mm_all_locks_mutex.
2403 */
2404 spin_unlock(&mapping->i_mmap_lock);
2405 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2406 &mapping->flags))
2407 BUG();
2408 }
2409}
2410
2411/*
2412 * The mmap_sem cannot be released by the caller until
2413 * mm_drop_all_locks() returns.
2414 */
2415void mm_drop_all_locks(struct mm_struct *mm)
2416{
2417 struct vm_area_struct *vma;
2418
2419 BUG_ON(down_read_trylock(&mm->mmap_sem));
2420 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2421
2422 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2423 if (vma->anon_vma)
2424 vm_unlock_anon_vma(vma->anon_vma);
2425 if (vma->vm_file && vma->vm_file->f_mapping)
2426 vm_unlock_mapping(vma->vm_file->f_mapping);
2427 }
2428
2429 mutex_unlock(&mm_all_locks_mutex);
2430}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
new file mode 100644
index 00000000000..5f4ef0250be
--- /dev/null
+++ b/mm/mmu_notifier.c
@@ -0,0 +1,277 @@
1/*
2 * linux/mm/mmu_notifier.c
3 *
4 * Copyright (C) 2008 Qumranet, Inc.
5 * Copyright (C) 2008 SGI
6 * Christoph Lameter <clameter@sgi.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2. See
9 * the COPYING file in the top-level directory.
10 */
11
12#include <linux/rculist.h>
13#include <linux/mmu_notifier.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/err.h>
17#include <linux/rcupdate.h>
18#include <linux/sched.h>
19
20/*
21 * This function can't run concurrently against mmu_notifier_register
22 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
23 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
24 * in parallel despite there being no task using this mm any more,
25 * through the vmas outside of the exit_mmap context, such as with
26 * vmtruncate. This serializes against mmu_notifier_unregister with
27 * the mmu_notifier_mm->lock in addition to RCU and it serializes
28 * against the other mmu notifiers with RCU. struct mmu_notifier_mm
29 * can't go away from under us as exit_mmap holds an mm_count pin
30 * itself.
31 */
32void __mmu_notifier_release(struct mm_struct *mm)
33{
34 struct mmu_notifier *mn;
35
36 spin_lock(&mm->mmu_notifier_mm->lock);
37 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
38 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
39 struct mmu_notifier,
40 hlist);
41 /*
42 * We arrived before mmu_notifier_unregister so
43 * mmu_notifier_unregister will do nothing other than
44 * to wait ->release to finish and
45 * mmu_notifier_unregister to return.
46 */
47 hlist_del_init_rcu(&mn->hlist);
48 /*
49 * RCU here will block mmu_notifier_unregister until
50 * ->release returns.
51 */
52 rcu_read_lock();
53 spin_unlock(&mm->mmu_notifier_mm->lock);
54 /*
55 * if ->release runs before mmu_notifier_unregister it
56 * must be handled as it's the only way for the driver
57 * to flush all existing sptes and stop the driver
58 * from establishing any more sptes before all the
59 * pages in the mm are freed.
60 */
61 if (mn->ops->release)
62 mn->ops->release(mn, mm);
63 rcu_read_unlock();
64 spin_lock(&mm->mmu_notifier_mm->lock);
65 }
66 spin_unlock(&mm->mmu_notifier_mm->lock);
67
68 /*
69 * synchronize_rcu here prevents mmu_notifier_release to
70 * return to exit_mmap (which would proceed freeing all pages
71 * in the mm) until the ->release method returns, if it was
72 * invoked by mmu_notifier_unregister.
73 *
74 * The mmu_notifier_mm can't go away from under us because one
75 * mm_count is hold by exit_mmap.
76 */
77 synchronize_rcu();
78}
79
80/*
81 * If no young bitflag is supported by the hardware, ->clear_flush_young can
82 * unmap the address and return 1 or 0 depending if the mapping previously
83 * existed or not.
84 */
85int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
86 unsigned long address)
87{
88 struct mmu_notifier *mn;
89 struct hlist_node *n;
90 int young = 0;
91
92 rcu_read_lock();
93 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
94 if (mn->ops->clear_flush_young)
95 young |= mn->ops->clear_flush_young(mn, mm, address);
96 }
97 rcu_read_unlock();
98
99 return young;
100}
101
102void __mmu_notifier_invalidate_page(struct mm_struct *mm,
103 unsigned long address)
104{
105 struct mmu_notifier *mn;
106 struct hlist_node *n;
107
108 rcu_read_lock();
109 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
110 if (mn->ops->invalidate_page)
111 mn->ops->invalidate_page(mn, mm, address);
112 }
113 rcu_read_unlock();
114}
115
116void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
117 unsigned long start, unsigned long end)
118{
119 struct mmu_notifier *mn;
120 struct hlist_node *n;
121
122 rcu_read_lock();
123 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
124 if (mn->ops->invalidate_range_start)
125 mn->ops->invalidate_range_start(mn, mm, start, end);
126 }
127 rcu_read_unlock();
128}
129
130void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
131 unsigned long start, unsigned long end)
132{
133 struct mmu_notifier *mn;
134 struct hlist_node *n;
135
136 rcu_read_lock();
137 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
138 if (mn->ops->invalidate_range_end)
139 mn->ops->invalidate_range_end(mn, mm, start, end);
140 }
141 rcu_read_unlock();
142}
143
144static int do_mmu_notifier_register(struct mmu_notifier *mn,
145 struct mm_struct *mm,
146 int take_mmap_sem)
147{
148 struct mmu_notifier_mm *mmu_notifier_mm;
149 int ret;
150
151 BUG_ON(atomic_read(&mm->mm_users) <= 0);
152
153 ret = -ENOMEM;
154 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
155 if (unlikely(!mmu_notifier_mm))
156 goto out;
157
158 if (take_mmap_sem)
159 down_write(&mm->mmap_sem);
160 ret = mm_take_all_locks(mm);
161 if (unlikely(ret))
162 goto out_cleanup;
163
164 if (!mm_has_notifiers(mm)) {
165 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
166 spin_lock_init(&mmu_notifier_mm->lock);
167 mm->mmu_notifier_mm = mmu_notifier_mm;
168 mmu_notifier_mm = NULL;
169 }
170 atomic_inc(&mm->mm_count);
171
172 /*
173 * Serialize the update against mmu_notifier_unregister. A
174 * side note: mmu_notifier_release can't run concurrently with
175 * us because we hold the mm_users pin (either implicitly as
176 * current->mm or explicitly with get_task_mm() or similar).
177 * We can't race against any other mmu notifier method either
178 * thanks to mm_take_all_locks().
179 */
180 spin_lock(&mm->mmu_notifier_mm->lock);
181 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
182 spin_unlock(&mm->mmu_notifier_mm->lock);
183
184 mm_drop_all_locks(mm);
185out_cleanup:
186 if (take_mmap_sem)
187 up_write(&mm->mmap_sem);
188 /* kfree() does nothing if mmu_notifier_mm is NULL */
189 kfree(mmu_notifier_mm);
190out:
191 BUG_ON(atomic_read(&mm->mm_users) <= 0);
192 return ret;
193}
194
195/*
196 * Must not hold mmap_sem nor any other VM related lock when calling
197 * this registration function. Must also ensure mm_users can't go down
198 * to zero while this runs to avoid races with mmu_notifier_release,
199 * so mm has to be current->mm or the mm should be pinned safely such
200 * as with get_task_mm(). If the mm is not current->mm, the mm_users
201 * pin should be released by calling mmput after mmu_notifier_register
202 * returns. mmu_notifier_unregister must be always called to
203 * unregister the notifier. mm_count is automatically pinned to allow
204 * mmu_notifier_unregister to safely run at any time later, before or
205 * after exit_mmap. ->release will always be called before exit_mmap
206 * frees the pages.
207 */
208int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
209{
210 return do_mmu_notifier_register(mn, mm, 1);
211}
212EXPORT_SYMBOL_GPL(mmu_notifier_register);
213
214/*
215 * Same as mmu_notifier_register but here the caller must hold the
216 * mmap_sem in write mode.
217 */
218int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
219{
220 return do_mmu_notifier_register(mn, mm, 0);
221}
222EXPORT_SYMBOL_GPL(__mmu_notifier_register);
223
224/* this is called after the last mmu_notifier_unregister() returned */
225void __mmu_notifier_mm_destroy(struct mm_struct *mm)
226{
227 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
228 kfree(mm->mmu_notifier_mm);
229 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
230}
231
232/*
233 * This releases the mm_count pin automatically and frees the mm
234 * structure if it was the last user of it. It serializes against
235 * running mmu notifiers with RCU and against mmu_notifier_unregister
236 * with the unregister lock + RCU. All sptes must be dropped before
237 * calling mmu_notifier_unregister. ->release or any other notifier
238 * method may be invoked concurrently with mmu_notifier_unregister,
239 * and only after mmu_notifier_unregister returned we're guaranteed
240 * that ->release or any other method can't run anymore.
241 */
242void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
243{
244 BUG_ON(atomic_read(&mm->mm_count) <= 0);
245
246 spin_lock(&mm->mmu_notifier_mm->lock);
247 if (!hlist_unhashed(&mn->hlist)) {
248 hlist_del_rcu(&mn->hlist);
249
250 /*
251 * RCU here will force exit_mmap to wait ->release to finish
252 * before freeing the pages.
253 */
254 rcu_read_lock();
255 spin_unlock(&mm->mmu_notifier_mm->lock);
256 /*
257 * exit_mmap will block in mmu_notifier_release to
258 * guarantee ->release is called before freeing the
259 * pages.
260 */
261 if (mn->ops->release)
262 mn->ops->release(mn, mm);
263 rcu_read_unlock();
264 } else
265 spin_unlock(&mm->mmu_notifier_mm->lock);
266
267 /*
268 * Wait any running method to finish, of course including
269 * ->release if it was run by mmu_notifier_relase instead of us.
270 */
271 synchronize_rcu();
272
273 BUG_ON(atomic_read(&mm->mm_count) <= 0);
274
275 mmdrop(mm);
276}
277EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index abd645a3b0a..fded06f923f 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -21,6 +21,7 @@
21#include <linux/syscalls.h> 21#include <linux/syscalls.h>
22#include <linux/swap.h> 22#include <linux/swap.h>
23#include <linux/swapops.h> 23#include <linux/swapops.h>
24#include <linux/mmu_notifier.h>
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include <asm/pgtable.h> 26#include <asm/pgtable.h>
26#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
@@ -203,10 +204,12 @@ success:
203 dirty_accountable = 1; 204 dirty_accountable = 1;
204 } 205 }
205 206
207 mmu_notifier_invalidate_range_start(mm, start, end);
206 if (is_vm_hugetlb_page(vma)) 208 if (is_vm_hugetlb_page(vma))
207 hugetlb_change_protection(vma, start, end, vma->vm_page_prot); 209 hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
208 else 210 else
209 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable); 211 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
212 mmu_notifier_invalidate_range_end(mm, start, end);
210 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); 213 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
211 vm_stat_account(mm, newflags, vma->vm_file, nrpages); 214 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
212 return 0; 215 return 0;
diff --git a/mm/mremap.c b/mm/mremap.c
index 08e3c7f2bd1..1a7743923c8 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -18,6 +18,7 @@
18#include <linux/highmem.h> 18#include <linux/highmem.h>
19#include <linux/security.h> 19#include <linux/security.h>
20#include <linux/syscalls.h> 20#include <linux/syscalls.h>
21#include <linux/mmu_notifier.h>
21 22
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
@@ -74,7 +75,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
74 struct mm_struct *mm = vma->vm_mm; 75 struct mm_struct *mm = vma->vm_mm;
75 pte_t *old_pte, *new_pte, pte; 76 pte_t *old_pte, *new_pte, pte;
76 spinlock_t *old_ptl, *new_ptl; 77 spinlock_t *old_ptl, *new_ptl;
78 unsigned long old_start;
77 79
80 old_start = old_addr;
81 mmu_notifier_invalidate_range_start(vma->vm_mm,
82 old_start, old_end);
78 if (vma->vm_file) { 83 if (vma->vm_file) {
79 /* 84 /*
80 * Subtle point from Rajesh Venkatasubramanian: before 85 * Subtle point from Rajesh Venkatasubramanian: before
@@ -116,6 +121,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
116 pte_unmap_unlock(old_pte - 1, old_ptl); 121 pte_unmap_unlock(old_pte - 1, old_ptl);
117 if (mapping) 122 if (mapping)
118 spin_unlock(&mapping->i_mmap_lock); 123 spin_unlock(&mapping->i_mmap_lock);
124 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
119} 125}
120 126
121#define LATENCY_LIMIT (64 * PAGE_SIZE) 127#define LATENCY_LIMIT (64 * PAGE_SIZE)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3cf3d05b6bd..401d104d2bb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3753,23 +3753,6 @@ unsigned long __init find_min_pfn_with_active_regions(void)
3753 return find_min_pfn_for_node(MAX_NUMNODES); 3753 return find_min_pfn_for_node(MAX_NUMNODES);
3754} 3754}
3755 3755
3756/**
3757 * find_max_pfn_with_active_regions - Find the maximum PFN registered
3758 *
3759 * It returns the maximum PFN based on information provided via
3760 * add_active_range().
3761 */
3762unsigned long __init find_max_pfn_with_active_regions(void)
3763{
3764 int i;
3765 unsigned long max_pfn = 0;
3766
3767 for (i = 0; i < nr_nodemap_entries; i++)
3768 max_pfn = max(max_pfn, early_node_map[i].end_pfn);
3769
3770 return max_pfn;
3771}
3772
3773/* 3756/*
3774 * early_calculate_totalpages() 3757 * early_calculate_totalpages()
3775 * Sum pages in active regions for movable zone. 3758 * Sum pages in active regions for movable zone.
diff --git a/mm/rmap.c b/mm/rmap.c
index 39ae5a9bf38..94a5246a3f9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -49,6 +49,7 @@
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/kallsyms.h> 50#include <linux/kallsyms.h>
51#include <linux/memcontrol.h> 51#include <linux/memcontrol.h>
52#include <linux/mmu_notifier.h>
52 53
53#include <asm/tlbflush.h> 54#include <asm/tlbflush.h>
54 55
@@ -287,7 +288,7 @@ static int page_referenced_one(struct page *page,
287 if (vma->vm_flags & VM_LOCKED) { 288 if (vma->vm_flags & VM_LOCKED) {
288 referenced++; 289 referenced++;
289 *mapcount = 1; /* break early from loop */ 290 *mapcount = 1; /* break early from loop */
290 } else if (ptep_clear_flush_young(vma, address, pte)) 291 } else if (ptep_clear_flush_young_notify(vma, address, pte))
291 referenced++; 292 referenced++;
292 293
293 /* Pretend the page is referenced if the task has the 294 /* Pretend the page is referenced if the task has the
@@ -457,7 +458,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
457 pte_t entry; 458 pte_t entry;
458 459
459 flush_cache_page(vma, address, pte_pfn(*pte)); 460 flush_cache_page(vma, address, pte_pfn(*pte));
460 entry = ptep_clear_flush(vma, address, pte); 461 entry = ptep_clear_flush_notify(vma, address, pte);
461 entry = pte_wrprotect(entry); 462 entry = pte_wrprotect(entry);
462 entry = pte_mkclean(entry); 463 entry = pte_mkclean(entry);
463 set_pte_at(mm, address, pte, entry); 464 set_pte_at(mm, address, pte, entry);
@@ -666,7 +667,8 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
666 * Leaving it set also helps swapoff to reinstate ptes 667 * Leaving it set also helps swapoff to reinstate ptes
667 * faster for those pages still in swapcache. 668 * faster for those pages still in swapcache.
668 */ 669 */
669 if (page_test_dirty(page)) { 670 if ((!PageAnon(page) || PageSwapCache(page)) &&
671 page_test_dirty(page)) {
670 page_clear_dirty(page); 672 page_clear_dirty(page);
671 set_page_dirty(page); 673 set_page_dirty(page);
672 } 674 }
@@ -705,14 +707,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
705 * skipped over this mm) then we should reactivate it. 707 * skipped over this mm) then we should reactivate it.
706 */ 708 */
707 if (!migration && ((vma->vm_flags & VM_LOCKED) || 709 if (!migration && ((vma->vm_flags & VM_LOCKED) ||
708 (ptep_clear_flush_young(vma, address, pte)))) { 710 (ptep_clear_flush_young_notify(vma, address, pte)))) {
709 ret = SWAP_FAIL; 711 ret = SWAP_FAIL;
710 goto out_unmap; 712 goto out_unmap;
711 } 713 }
712 714
713 /* Nuke the page table entry. */ 715 /* Nuke the page table entry. */
714 flush_cache_page(vma, address, page_to_pfn(page)); 716 flush_cache_page(vma, address, page_to_pfn(page));
715 pteval = ptep_clear_flush(vma, address, pte); 717 pteval = ptep_clear_flush_notify(vma, address, pte);
716 718
717 /* Move the dirty bit to the physical page now the pte is gone. */ 719 /* Move the dirty bit to the physical page now the pte is gone. */
718 if (pte_dirty(pteval)) 720 if (pte_dirty(pteval))
@@ -837,12 +839,12 @@ static void try_to_unmap_cluster(unsigned long cursor,
837 page = vm_normal_page(vma, address, *pte); 839 page = vm_normal_page(vma, address, *pte);
838 BUG_ON(!page || PageAnon(page)); 840 BUG_ON(!page || PageAnon(page));
839 841
840 if (ptep_clear_flush_young(vma, address, pte)) 842 if (ptep_clear_flush_young_notify(vma, address, pte))
841 continue; 843 continue;
842 844
843 /* Nuke the page table entry. */ 845 /* Nuke the page table entry. */
844 flush_cache_page(vma, address, pte_pfn(*pte)); 846 flush_cache_page(vma, address, pte_pfn(*pte));
845 pteval = ptep_clear_flush(vma, address, pte); 847 pteval = ptep_clear_flush_notify(vma, address, pte);
846 848
847 /* If nonlinear, store the file page offset in the pte. */ 849 /* If nonlinear, store the file page offset in the pte. */
848 if (page->index != linear_page_index(vma, address)) 850 if (page->index != linear_page_index(vma, address))
diff --git a/mm/shmem.c b/mm/shmem.c
index 952d361774b..c1e5a3b4f75 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1513,7 +1513,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1513 inode->i_uid = current->fsuid; 1513 inode->i_uid = current->fsuid;
1514 inode->i_gid = current->fsgid; 1514 inode->i_gid = current->fsgid;
1515 inode->i_blocks = 0; 1515 inode->i_blocks = 0;
1516 inode->i_mapping->a_ops = &shmem_aops;
1517 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1516 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1518 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1517 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1519 inode->i_generation = get_seconds(); 1518 inode->i_generation = get_seconds();
@@ -1528,6 +1527,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1528 init_special_inode(inode, mode, dev); 1527 init_special_inode(inode, mode, dev);
1529 break; 1528 break;
1530 case S_IFREG: 1529 case S_IFREG:
1530 inode->i_mapping->a_ops = &shmem_aops;
1531 inode->i_op = &shmem_inode_operations; 1531 inode->i_op = &shmem_inode_operations;
1532 inode->i_fop = &shmem_file_operations; 1532 inode->i_fop = &shmem_file_operations;
1533 mpol_shared_policy_init(&info->policy, 1533 mpol_shared_policy_init(&info->policy,
@@ -1929,6 +1929,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
1929 return error; 1929 return error;
1930 } 1930 }
1931 unlock_page(page); 1931 unlock_page(page);
1932 inode->i_mapping->a_ops = &shmem_aops;
1932 inode->i_op = &shmem_symlink_inode_operations; 1933 inode->i_op = &shmem_symlink_inode_operations;
1933 kaddr = kmap_atomic(page, KM_USER0); 1934 kaddr = kmap_atomic(page, KM_USER0);
1934 memcpy(kaddr, symname, len); 1935 memcpy(kaddr, symname, len);
diff --git a/mm/swap.c b/mm/swap.c
index dd89234ee51..7417a2adbe5 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -278,9 +278,10 @@ int lru_add_drain_all(void)
278 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it 278 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
279 * for the remainder of the operation. 279 * for the remainder of the operation.
280 * 280 *
281 * The locking in this function is against shrink_cache(): we recheck the 281 * The locking in this function is against shrink_inactive_list(): we recheck
282 * page count inside the lock to see whether shrink_cache grabbed the page 282 * the page count inside the lock to see whether shrink_inactive_list()
283 * via the LRU. If it did, give up: shrink_cache will free it. 283 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
284 * will free it.
284 */ 285 */
285void release_pages(struct page **pages, int nr, int cold) 286void release_pages(struct page **pages, int nr, int cold)
286{ 287{
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6beb6251e99..bb7f79641f9 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -656,8 +656,8 @@ static int unuse_mm(struct mm_struct *mm,
656 656
657 if (!down_read_trylock(&mm->mmap_sem)) { 657 if (!down_read_trylock(&mm->mmap_sem)) {
658 /* 658 /*
659 * Activate page so shrink_cache is unlikely to unmap its 659 * Activate page so shrink_inactive_list is unlikely to unmap
660 * ptes while lock is dropped, so swapoff can make progress. 660 * its ptes while lock is dropped, so swapoff can make progress.
661 */ 661 */
662 activate_page(page); 662 activate_page(page);
663 unlock_page(page); 663 unlock_page(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8f71761bc4b..75be453628b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1408,7 +1408,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1408 if (sc->nr_scanned && priority < DEF_PRIORITY - 2) 1408 if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1409 congestion_wait(WRITE, HZ/10); 1409 congestion_wait(WRITE, HZ/10);
1410 } 1410 }
1411 /* top priority shrink_caches still had more to do? don't OOM, then */ 1411 /* top priority shrink_zones still had more to do? don't OOM, then */
1412 if (!sc->all_unreclaimable && scan_global_lru(sc)) 1412 if (!sc->all_unreclaimable && scan_global_lru(sc))
1413 ret = nr_reclaimed; 1413 ret = nr_reclaimed;
1414out: 1414out:
@@ -1979,7 +1979,7 @@ module_init(kswapd_init)
1979int zone_reclaim_mode __read_mostly; 1979int zone_reclaim_mode __read_mostly;
1980 1980
1981#define RECLAIM_OFF 0 1981#define RECLAIM_OFF 0
1982#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */ 1982#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
1983#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 1983#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1984#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 1984#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
1985 1985