aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2008-10-18 23:26:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:52:31 -0400
commitba470de43188cdbff795b5da43a1474523c6c2fb (patch)
tree0477460fa8c3e61edd9f1534cd2193656e586f8b /mm/mmap.c
parent8edb08caf68184fb170f4f69c7445929e199eaea (diff)
mmap: handle mlocked pages during map, remap, unmap
Originally by Nick Piggin <npiggin@suse.de> Remove mlocked pages from the LRU using "unevictable infrastructure" during mmap(), munmap(), mremap() and truncate(). Try to move back to normal LRU lists on munmap() when last mlocked mapping removed. Remove PageMlocked() status when page truncated from file. [akpm@linux-foundation.org: cleanup] [kamezawa.hiroyu@jp.fujitsu.com: fix double unlock_page()] [kosaki.motohiro@jp.fujitsu.com: split LRU: munlock rework] [lee.schermerhorn@hp.com: mlock: fix __mlock_vma_pages_range comment block] [akpm@linux-foundation.org: remove bogus kerneldoc token] Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: KAMEZAWA Hiroyuki <kamewzawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c71
1 files changed, 52 insertions, 19 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 7bdfd2661f17..505a454f365e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -970,6 +970,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
970 return -EPERM; 970 return -EPERM;
971 vm_flags |= VM_LOCKED; 971 vm_flags |= VM_LOCKED;
972 } 972 }
973
973 /* mlock MCL_FUTURE? */ 974 /* mlock MCL_FUTURE? */
974 if (vm_flags & VM_LOCKED) { 975 if (vm_flags & VM_LOCKED) {
975 unsigned long locked, lock_limit; 976 unsigned long locked, lock_limit;
@@ -1137,10 +1138,12 @@ munmap_back:
1137 * The VM_SHARED test is necessary because shmem_zero_setup 1138 * The VM_SHARED test is necessary because shmem_zero_setup
1138 * will create the file object for a shared anonymous map below. 1139 * will create the file object for a shared anonymous map below.
1139 */ 1140 */
1140 if (!file && !(vm_flags & VM_SHARED) && 1141 if (!file && !(vm_flags & VM_SHARED)) {
1141 vma_merge(mm, prev, addr, addr + len, vm_flags, 1142 vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
1142 NULL, NULL, pgoff, NULL)) 1143 NULL, NULL, pgoff, NULL);
1143 goto out; 1144 if (vma)
1145 goto out;
1146 }
1144 1147
1145 /* 1148 /*
1146 * Determine the object being mapped and call the appropriate 1149 * Determine the object being mapped and call the appropriate
@@ -1222,10 +1225,14 @@ out:
1222 mm->total_vm += len >> PAGE_SHIFT; 1225 mm->total_vm += len >> PAGE_SHIFT;
1223 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); 1226 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1224 if (vm_flags & VM_LOCKED) { 1227 if (vm_flags & VM_LOCKED) {
1225 mm->locked_vm += len >> PAGE_SHIFT; 1228 /*
1226 make_pages_present(addr, addr + len); 1229 * makes pages present; downgrades, drops, reacquires mmap_sem
1227 } 1230 */
1228 if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) 1231 long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
1232 if (nr_pages < 0)
1233 return nr_pages; /* vma gone! */
1234 mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
1235 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1229 make_pages_present(addr, addr + len); 1236 make_pages_present(addr, addr + len);
1230 return addr; 1237 return addr;
1231 1238
@@ -1698,8 +1705,10 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
1698 return vma; 1705 return vma;
1699 if (!prev || expand_stack(prev, addr)) 1706 if (!prev || expand_stack(prev, addr))
1700 return NULL; 1707 return NULL;
1701 if (prev->vm_flags & VM_LOCKED) 1708 if (prev->vm_flags & VM_LOCKED) {
1702 make_pages_present(addr, prev->vm_end); 1709 if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
1710 return NULL; /* vma gone! */
1711 }
1703 return prev; 1712 return prev;
1704} 1713}
1705#else 1714#else
@@ -1725,8 +1734,10 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
1725 start = vma->vm_start; 1734 start = vma->vm_start;
1726 if (expand_stack(vma, addr)) 1735 if (expand_stack(vma, addr))
1727 return NULL; 1736 return NULL;
1728 if (vma->vm_flags & VM_LOCKED) 1737 if (vma->vm_flags & VM_LOCKED) {
1729 make_pages_present(addr, start); 1738 if (mlock_vma_pages_range(vma, addr, start) < 0)
1739 return NULL; /* vma gone! */
1740 }
1730 return vma; 1741 return vma;
1731} 1742}
1732#endif 1743#endif
@@ -1745,8 +1756,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1745 long nrpages = vma_pages(vma); 1756 long nrpages = vma_pages(vma);
1746 1757
1747 mm->total_vm -= nrpages; 1758 mm->total_vm -= nrpages;
1748 if (vma->vm_flags & VM_LOCKED)
1749 mm->locked_vm -= nrpages;
1750 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); 1759 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1751 vma = remove_vma(vma); 1760 vma = remove_vma(vma);
1752 } while (vma); 1761 } while (vma);
@@ -1912,6 +1921,20 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1912 vma = prev? prev->vm_next: mm->mmap; 1921 vma = prev? prev->vm_next: mm->mmap;
1913 1922
1914 /* 1923 /*
1924 * unlock any mlock()ed ranges before detaching vmas
1925 */
1926 if (mm->locked_vm) {
1927 struct vm_area_struct *tmp = vma;
1928 while (tmp && tmp->vm_start < end) {
1929 if (tmp->vm_flags & VM_LOCKED) {
1930 mm->locked_vm -= vma_pages(tmp);
1931 munlock_vma_pages_all(tmp);
1932 }
1933 tmp = tmp->vm_next;
1934 }
1935 }
1936
1937 /*
1915 * Remove the vma's, and unmap the actual pages 1938 * Remove the vma's, and unmap the actual pages
1916 */ 1939 */
1917 detach_vmas_to_be_unmapped(mm, vma, prev, end); 1940 detach_vmas_to_be_unmapped(mm, vma, prev, end);
@@ -2023,8 +2046,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
2023 return -ENOMEM; 2046 return -ENOMEM;
2024 2047
2025 /* Can we just expand an old private anonymous mapping? */ 2048 /* Can we just expand an old private anonymous mapping? */
2026 if (vma_merge(mm, prev, addr, addr + len, flags, 2049 vma = vma_merge(mm, prev, addr, addr + len, flags,
2027 NULL, NULL, pgoff, NULL)) 2050 NULL, NULL, pgoff, NULL);
2051 if (vma)
2028 goto out; 2052 goto out;
2029 2053
2030 /* 2054 /*
@@ -2046,8 +2070,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
2046out: 2070out:
2047 mm->total_vm += len >> PAGE_SHIFT; 2071 mm->total_vm += len >> PAGE_SHIFT;
2048 if (flags & VM_LOCKED) { 2072 if (flags & VM_LOCKED) {
2049 mm->locked_vm += len >> PAGE_SHIFT; 2073 if (!mlock_vma_pages_range(vma, addr, addr + len))
2050 make_pages_present(addr, addr + len); 2074 mm->locked_vm += (len >> PAGE_SHIFT);
2051 } 2075 }
2052 return addr; 2076 return addr;
2053} 2077}
@@ -2058,7 +2082,7 @@ EXPORT_SYMBOL(do_brk);
2058void exit_mmap(struct mm_struct *mm) 2082void exit_mmap(struct mm_struct *mm)
2059{ 2083{
2060 struct mmu_gather *tlb; 2084 struct mmu_gather *tlb;
2061 struct vm_area_struct *vma = mm->mmap; 2085 struct vm_area_struct *vma;
2062 unsigned long nr_accounted = 0; 2086 unsigned long nr_accounted = 0;
2063 unsigned long end; 2087 unsigned long end;
2064 2088
@@ -2066,6 +2090,15 @@ void exit_mmap(struct mm_struct *mm)
2066 arch_exit_mmap(mm); 2090 arch_exit_mmap(mm);
2067 mmu_notifier_release(mm); 2091 mmu_notifier_release(mm);
2068 2092
2093 if (mm->locked_vm) {
2094 vma = mm->mmap;
2095 while (vma) {
2096 if (vma->vm_flags & VM_LOCKED)
2097 munlock_vma_pages_all(vma);
2098 vma = vma->vm_next;
2099 }
2100 }
2101 vma = mm->mmap;
2069 lru_add_drain(); 2102 lru_add_drain();
2070 flush_cache_mm(mm); 2103 flush_cache_mm(mm);
2071 tlb = tlb_gather_mmu(mm, 1); 2104 tlb = tlb_gather_mmu(mm, 1);