aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c38
1 files changed, 28 insertions, 10 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 6446c6134b04..eea8eefd51a8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -30,6 +30,10 @@
30#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
31#include <asm/tlb.h> 31#include <asm/tlb.h>
32 32
33#ifndef arch_mmap_check
34#define arch_mmap_check(addr, len, flags) (0)
35#endif
36
33static void unmap_region(struct mm_struct *mm, 37static void unmap_region(struct mm_struct *mm,
34 struct vm_area_struct *vma, struct vm_area_struct *prev, 38 struct vm_area_struct *vma, struct vm_area_struct *prev,
35 unsigned long start, unsigned long end); 39 unsigned long start, unsigned long end);
@@ -60,6 +64,13 @@ pgprot_t protection_map[16] = {
60 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 64 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61}; 65};
62 66
67pgprot_t vm_get_page_prot(unsigned long vm_flags)
68{
69 return protection_map[vm_flags &
70 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
71}
72EXPORT_SYMBOL(vm_get_page_prot);
73
63int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 74int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
64int sysctl_overcommit_ratio = 50; /* default is 50% */ 75int sysctl_overcommit_ratio = 50; /* default is 50% */
65int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 76int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
@@ -96,7 +107,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
96 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 107 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
97 unsigned long n; 108 unsigned long n;
98 109
99 free = get_page_cache_size(); 110 free = global_page_state(NR_FILE_PAGES);
100 free += nr_swap_pages; 111 free += nr_swap_pages;
101 112
102 /* 113 /*
@@ -105,7 +116,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
105 * which are reclaimable, under pressure. The dentry 116 * which are reclaimable, under pressure. The dentry
106 * cache and most inode caches should fall into this 117 * cache and most inode caches should fall into this
107 */ 118 */
108 free += atomic_read(&slab_reclaim_pages); 119 free += global_page_state(NR_SLAB_RECLAIMABLE);
109 120
110 /* 121 /*
111 * Leave the last 3% for root 122 * Leave the last 3% for root
@@ -913,6 +924,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
913 if (!len) 924 if (!len)
914 return -EINVAL; 925 return -EINVAL;
915 926
927 error = arch_mmap_check(addr, len, flags);
928 if (error)
929 return error;
930
916 /* Careful about overflows.. */ 931 /* Careful about overflows.. */
917 len = PAGE_ALIGN(len); 932 len = PAGE_ALIGN(len);
918 if (!len || len > TASK_SIZE) 933 if (!len || len > TASK_SIZE)
@@ -1090,12 +1105,6 @@ munmap_back:
1090 goto free_vma; 1105 goto free_vma;
1091 } 1106 }
1092 1107
1093 /* Don't make the VMA automatically writable if it's shared, but the
1094 * backer wishes to know when pages are first written to */
1095 if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1096 vma->vm_page_prot =
1097 protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
1098
1099 /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform 1108 /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
1100 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) 1109 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
1101 * that memory reservation must be checked; but that reservation 1110 * that memory reservation must be checked; but that reservation
@@ -1113,6 +1122,10 @@ munmap_back:
1113 pgoff = vma->vm_pgoff; 1122 pgoff = vma->vm_pgoff;
1114 vm_flags = vma->vm_flags; 1123 vm_flags = vma->vm_flags;
1115 1124
1125 if (vma_wants_writenotify(vma))
1126 vma->vm_page_prot =
1127 protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
1128
1116 if (!file || !vma_merge(mm, prev, addr, vma->vm_end, 1129 if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
1117 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { 1130 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
1118 file = vma->vm_file; 1131 file = vma->vm_file;
@@ -1859,6 +1872,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1859 unsigned long flags; 1872 unsigned long flags;
1860 struct rb_node ** rb_link, * rb_parent; 1873 struct rb_node ** rb_link, * rb_parent;
1861 pgoff_t pgoff = addr >> PAGE_SHIFT; 1874 pgoff_t pgoff = addr >> PAGE_SHIFT;
1875 int error;
1862 1876
1863 len = PAGE_ALIGN(len); 1877 len = PAGE_ALIGN(len);
1864 if (!len) 1878 if (!len)
@@ -1867,6 +1881,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1867 if ((addr + len) > TASK_SIZE || (addr + len) < addr) 1881 if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1868 return -EINVAL; 1882 return -EINVAL;
1869 1883
1884 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1885
1886 error = arch_mmap_check(addr, len, flags);
1887 if (error)
1888 return error;
1889
1870 /* 1890 /*
1871 * mlock MCL_FUTURE? 1891 * mlock MCL_FUTURE?
1872 */ 1892 */
@@ -1907,8 +1927,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1907 if (security_vm_enough_memory(len >> PAGE_SHIFT)) 1927 if (security_vm_enough_memory(len >> PAGE_SHIFT))
1908 return -ENOMEM; 1928 return -ENOMEM;
1909 1929
1910 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1911
1912 /* Can we just expand an old private anonymous mapping? */ 1930 /* Can we just expand an old private anonymous mapping? */
1913 if (vma_merge(mm, prev, addr, addr + len, flags, 1931 if (vma_merge(mm, prev, addr, addr + len, flags,
1914 NULL, NULL, pgoff, NULL)) 1932 NULL, NULL, pgoff, NULL))