diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 81 |
1 files changed, 56 insertions, 25 deletions
@@ -410,7 +410,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, | |||
410 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); | 410 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); |
411 | } | 411 | } |
412 | 412 | ||
413 | static inline void __vma_link_file(struct vm_area_struct *vma) | 413 | static void __vma_link_file(struct vm_area_struct *vma) |
414 | { | 414 | { |
415 | struct file * file; | 415 | struct file * file; |
416 | 416 | ||
@@ -662,8 +662,6 @@ again: remove_next = 1 + (end > next->vm_end); | |||
662 | * If the vma has a ->close operation then the driver probably needs to release | 662 | * If the vma has a ->close operation then the driver probably needs to release |
663 | * per-vma resources, so we don't attempt to merge those. | 663 | * per-vma resources, so we don't attempt to merge those. |
664 | */ | 664 | */ |
665 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) | ||
666 | |||
667 | static inline int is_mergeable_vma(struct vm_area_struct *vma, | 665 | static inline int is_mergeable_vma(struct vm_area_struct *vma, |
668 | struct file *file, unsigned long vm_flags) | 666 | struct file *file, unsigned long vm_flags) |
669 | { | 667 | { |
@@ -972,6 +970,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, | |||
972 | return -EPERM; | 970 | return -EPERM; |
973 | vm_flags |= VM_LOCKED; | 971 | vm_flags |= VM_LOCKED; |
974 | } | 972 | } |
973 | |||
975 | /* mlock MCL_FUTURE? */ | 974 | /* mlock MCL_FUTURE? */ |
976 | if (vm_flags & VM_LOCKED) { | 975 | if (vm_flags & VM_LOCKED) { |
977 | unsigned long locked, lock_limit; | 976 | unsigned long locked, lock_limit; |
@@ -1139,10 +1138,12 @@ munmap_back: | |||
1139 | * The VM_SHARED test is necessary because shmem_zero_setup | 1138 | * The VM_SHARED test is necessary because shmem_zero_setup |
1140 | * will create the file object for a shared anonymous map below. | 1139 | * will create the file object for a shared anonymous map below. |
1141 | */ | 1140 | */ |
1142 | if (!file && !(vm_flags & VM_SHARED) && | 1141 | if (!file && !(vm_flags & VM_SHARED)) { |
1143 | vma_merge(mm, prev, addr, addr + len, vm_flags, | 1142 | vma = vma_merge(mm, prev, addr, addr + len, vm_flags, |
1144 | NULL, NULL, pgoff, NULL)) | 1143 | NULL, NULL, pgoff, NULL); |
1145 | goto out; | 1144 | if (vma) |
1145 | goto out; | ||
1146 | } | ||
1146 | 1147 | ||
1147 | /* | 1148 | /* |
1148 | * Determine the object being mapped and call the appropriate | 1149 | * Determine the object being mapped and call the appropriate |
@@ -1224,10 +1225,14 @@ out: | |||
1224 | mm->total_vm += len >> PAGE_SHIFT; | 1225 | mm->total_vm += len >> PAGE_SHIFT; |
1225 | vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); | 1226 | vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); |
1226 | if (vm_flags & VM_LOCKED) { | 1227 | if (vm_flags & VM_LOCKED) { |
1227 | mm->locked_vm += len >> PAGE_SHIFT; | 1228 | /* |
1228 | make_pages_present(addr, addr + len); | 1229 | * makes pages present; downgrades, drops, reacquires mmap_sem |
1229 | } | 1230 | */ |
1230 | if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) | 1231 | long nr_pages = mlock_vma_pages_range(vma, addr, addr + len); |
1232 | if (nr_pages < 0) | ||
1233 | return nr_pages; /* vma gone! */ | ||
1234 | mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages; | ||
1235 | } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) | ||
1231 | make_pages_present(addr, addr + len); | 1236 | make_pages_present(addr, addr + len); |
1232 | return addr; | 1237 | return addr; |
1233 | 1238 | ||
@@ -1586,7 +1591,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un | |||
1586 | * vma is the last one with address > vma->vm_end. Have to extend vma. | 1591 | * vma is the last one with address > vma->vm_end. Have to extend vma. |
1587 | */ | 1592 | */ |
1588 | #ifndef CONFIG_IA64 | 1593 | #ifndef CONFIG_IA64 |
1589 | static inline | 1594 | static |
1590 | #endif | 1595 | #endif |
1591 | int expand_upwards(struct vm_area_struct *vma, unsigned long address) | 1596 | int expand_upwards(struct vm_area_struct *vma, unsigned long address) |
1592 | { | 1597 | { |
@@ -1636,7 +1641,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) | |||
1636 | /* | 1641 | /* |
1637 | * vma is the first one with address < vma->vm_start. Have to extend vma. | 1642 | * vma is the first one with address < vma->vm_start. Have to extend vma. |
1638 | */ | 1643 | */ |
1639 | static inline int expand_downwards(struct vm_area_struct *vma, | 1644 | static int expand_downwards(struct vm_area_struct *vma, |
1640 | unsigned long address) | 1645 | unsigned long address) |
1641 | { | 1646 | { |
1642 | int error; | 1647 | int error; |
@@ -1698,10 +1703,12 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) | |||
1698 | vma = find_vma_prev(mm, addr, &prev); | 1703 | vma = find_vma_prev(mm, addr, &prev); |
1699 | if (vma && (vma->vm_start <= addr)) | 1704 | if (vma && (vma->vm_start <= addr)) |
1700 | return vma; | 1705 | return vma; |
1701 | if (!prev || expand_stack(prev, addr)) | 1706 | if (expand_stack(prev, addr)) |
1702 | return NULL; | 1707 | return NULL; |
1703 | if (prev->vm_flags & VM_LOCKED) | 1708 | if (prev->vm_flags & VM_LOCKED) { |
1704 | make_pages_present(addr, prev->vm_end); | 1709 | if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0) |
1710 | return NULL; /* vma gone! */ | ||
1711 | } | ||
1705 | return prev; | 1712 | return prev; |
1706 | } | 1713 | } |
1707 | #else | 1714 | #else |
@@ -1727,8 +1734,10 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) | |||
1727 | start = vma->vm_start; | 1734 | start = vma->vm_start; |
1728 | if (expand_stack(vma, addr)) | 1735 | if (expand_stack(vma, addr)) |
1729 | return NULL; | 1736 | return NULL; |
1730 | if (vma->vm_flags & VM_LOCKED) | 1737 | if (vma->vm_flags & VM_LOCKED) { |
1731 | make_pages_present(addr, start); | 1738 | if (mlock_vma_pages_range(vma, addr, start) < 0) |
1739 | return NULL; /* vma gone! */ | ||
1740 | } | ||
1732 | return vma; | 1741 | return vma; |
1733 | } | 1742 | } |
1734 | #endif | 1743 | #endif |
@@ -1747,8 +1756,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) | |||
1747 | long nrpages = vma_pages(vma); | 1756 | long nrpages = vma_pages(vma); |
1748 | 1757 | ||
1749 | mm->total_vm -= nrpages; | 1758 | mm->total_vm -= nrpages; |
1750 | if (vma->vm_flags & VM_LOCKED) | ||
1751 | mm->locked_vm -= nrpages; | ||
1752 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); | 1759 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); |
1753 | vma = remove_vma(vma); | 1760 | vma = remove_vma(vma); |
1754 | } while (vma); | 1761 | } while (vma); |
@@ -1914,6 +1921,20 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1914 | vma = prev? prev->vm_next: mm->mmap; | 1921 | vma = prev? prev->vm_next: mm->mmap; |
1915 | 1922 | ||
1916 | /* | 1923 | /* |
1924 | * unlock any mlock()ed ranges before detaching vmas | ||
1925 | */ | ||
1926 | if (mm->locked_vm) { | ||
1927 | struct vm_area_struct *tmp = vma; | ||
1928 | while (tmp && tmp->vm_start < end) { | ||
1929 | if (tmp->vm_flags & VM_LOCKED) { | ||
1930 | mm->locked_vm -= vma_pages(tmp); | ||
1931 | munlock_vma_pages_all(tmp); | ||
1932 | } | ||
1933 | tmp = tmp->vm_next; | ||
1934 | } | ||
1935 | } | ||
1936 | |||
1937 | /* | ||
1917 | * Remove the vma's, and unmap the actual pages | 1938 | * Remove the vma's, and unmap the actual pages |
1918 | */ | 1939 | */ |
1919 | detach_vmas_to_be_unmapped(mm, vma, prev, end); | 1940 | detach_vmas_to_be_unmapped(mm, vma, prev, end); |
@@ -2025,8 +2046,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len) | |||
2025 | return -ENOMEM; | 2046 | return -ENOMEM; |
2026 | 2047 | ||
2027 | /* Can we just expand an old private anonymous mapping? */ | 2048 | /* Can we just expand an old private anonymous mapping? */ |
2028 | if (vma_merge(mm, prev, addr, addr + len, flags, | 2049 | vma = vma_merge(mm, prev, addr, addr + len, flags, |
2029 | NULL, NULL, pgoff, NULL)) | 2050 | NULL, NULL, pgoff, NULL); |
2051 | if (vma) | ||
2030 | goto out; | 2052 | goto out; |
2031 | 2053 | ||
2032 | /* | 2054 | /* |
@@ -2048,8 +2070,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len) | |||
2048 | out: | 2070 | out: |
2049 | mm->total_vm += len >> PAGE_SHIFT; | 2071 | mm->total_vm += len >> PAGE_SHIFT; |
2050 | if (flags & VM_LOCKED) { | 2072 | if (flags & VM_LOCKED) { |
2051 | mm->locked_vm += len >> PAGE_SHIFT; | 2073 | if (!mlock_vma_pages_range(vma, addr, addr + len)) |
2052 | make_pages_present(addr, addr + len); | 2074 | mm->locked_vm += (len >> PAGE_SHIFT); |
2053 | } | 2075 | } |
2054 | return addr; | 2076 | return addr; |
2055 | } | 2077 | } |
@@ -2060,7 +2082,7 @@ EXPORT_SYMBOL(do_brk); | |||
2060 | void exit_mmap(struct mm_struct *mm) | 2082 | void exit_mmap(struct mm_struct *mm) |
2061 | { | 2083 | { |
2062 | struct mmu_gather *tlb; | 2084 | struct mmu_gather *tlb; |
2063 | struct vm_area_struct *vma = mm->mmap; | 2085 | struct vm_area_struct *vma; |
2064 | unsigned long nr_accounted = 0; | 2086 | unsigned long nr_accounted = 0; |
2065 | unsigned long end; | 2087 | unsigned long end; |
2066 | 2088 | ||
@@ -2068,6 +2090,15 @@ void exit_mmap(struct mm_struct *mm) | |||
2068 | arch_exit_mmap(mm); | 2090 | arch_exit_mmap(mm); |
2069 | mmu_notifier_release(mm); | 2091 | mmu_notifier_release(mm); |
2070 | 2092 | ||
2093 | if (mm->locked_vm) { | ||
2094 | vma = mm->mmap; | ||
2095 | while (vma) { | ||
2096 | if (vma->vm_flags & VM_LOCKED) | ||
2097 | munlock_vma_pages_all(vma); | ||
2098 | vma = vma->vm_next; | ||
2099 | } | ||
2100 | } | ||
2101 | vma = mm->mmap; | ||
2071 | lru_add_drain(); | 2102 | lru_add_drain(); |
2072 | flush_cache_mm(mm); | 2103 | flush_cache_mm(mm); |
2073 | tlb = tlb_gather_mmu(mm, 1); | 2104 | tlb = tlb_gather_mmu(mm, 1); |