diff options
Diffstat (limited to 'mm/nommu.c')
-rw-r--r-- | mm/nommu.c | 115 |
1 files changed, 66 insertions, 49 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index b51eadf6d952..7296360fc057 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | void *high_memory; | 61 | void *high_memory; |
62 | EXPORT_SYMBOL(high_memory); | ||
62 | struct page *mem_map; | 63 | struct page *mem_map; |
63 | unsigned long max_mapnr; | 64 | unsigned long max_mapnr; |
64 | unsigned long highest_memmap_pfn; | 65 | unsigned long highest_memmap_pfn; |
@@ -213,6 +214,39 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
213 | } | 214 | } |
214 | EXPORT_SYMBOL(get_user_pages); | 215 | EXPORT_SYMBOL(get_user_pages); |
215 | 216 | ||
217 | long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, | ||
218 | unsigned long start, unsigned long nr_pages, | ||
219 | int write, int force, struct page **pages, | ||
220 | int *locked) | ||
221 | { | ||
222 | return get_user_pages(tsk, mm, start, nr_pages, write, force, | ||
223 | pages, NULL); | ||
224 | } | ||
225 | EXPORT_SYMBOL(get_user_pages_locked); | ||
226 | |||
227 | long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | ||
228 | unsigned long start, unsigned long nr_pages, | ||
229 | int write, int force, struct page **pages, | ||
230 | unsigned int gup_flags) | ||
231 | { | ||
232 | long ret; | ||
233 | down_read(&mm->mmap_sem); | ||
234 | ret = get_user_pages(tsk, mm, start, nr_pages, write, force, | ||
235 | pages, NULL); | ||
236 | up_read(&mm->mmap_sem); | ||
237 | return ret; | ||
238 | } | ||
239 | EXPORT_SYMBOL(__get_user_pages_unlocked); | ||
240 | |||
241 | long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | ||
242 | unsigned long start, unsigned long nr_pages, | ||
243 | int write, int force, struct page **pages) | ||
244 | { | ||
245 | return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, | ||
246 | force, pages, 0); | ||
247 | } | ||
248 | EXPORT_SYMBOL(get_user_pages_unlocked); | ||
249 | |||
216 | /** | 250 | /** |
217 | * follow_pfn - look up PFN at a user virtual address | 251 | * follow_pfn - look up PFN at a user virtual address |
218 | * @vma: memory mapping | 252 | * @vma: memory mapping |
@@ -946,9 +980,6 @@ static int validate_mmap_request(struct file *file, | |||
946 | return -EOVERFLOW; | 980 | return -EOVERFLOW; |
947 | 981 | ||
948 | if (file) { | 982 | if (file) { |
949 | /* validate file mapping requests */ | ||
950 | struct address_space *mapping; | ||
951 | |||
952 | /* files must support mmap */ | 983 | /* files must support mmap */ |
953 | if (!file->f_op->mmap) | 984 | if (!file->f_op->mmap) |
954 | return -ENODEV; | 985 | return -ENODEV; |
@@ -957,28 +988,22 @@ static int validate_mmap_request(struct file *file, | |||
957 | * - we support chardevs that provide their own "memory" | 988 | * - we support chardevs that provide their own "memory" |
958 | * - we support files/blockdevs that are memory backed | 989 | * - we support files/blockdevs that are memory backed |
959 | */ | 990 | */ |
960 | mapping = file->f_mapping; | 991 | if (file->f_op->mmap_capabilities) { |
961 | if (!mapping) | 992 | capabilities = file->f_op->mmap_capabilities(file); |
962 | mapping = file_inode(file)->i_mapping; | 993 | } else { |
963 | |||
964 | capabilities = 0; | ||
965 | if (mapping && mapping->backing_dev_info) | ||
966 | capabilities = mapping->backing_dev_info->capabilities; | ||
967 | |||
968 | if (!capabilities) { | ||
969 | /* no explicit capabilities set, so assume some | 994 | /* no explicit capabilities set, so assume some |
970 | * defaults */ | 995 | * defaults */ |
971 | switch (file_inode(file)->i_mode & S_IFMT) { | 996 | switch (file_inode(file)->i_mode & S_IFMT) { |
972 | case S_IFREG: | 997 | case S_IFREG: |
973 | case S_IFBLK: | 998 | case S_IFBLK: |
974 | capabilities = BDI_CAP_MAP_COPY; | 999 | capabilities = NOMMU_MAP_COPY; |
975 | break; | 1000 | break; |
976 | 1001 | ||
977 | case S_IFCHR: | 1002 | case S_IFCHR: |
978 | capabilities = | 1003 | capabilities = |
979 | BDI_CAP_MAP_DIRECT | | 1004 | NOMMU_MAP_DIRECT | |
980 | BDI_CAP_READ_MAP | | 1005 | NOMMU_MAP_READ | |
981 | BDI_CAP_WRITE_MAP; | 1006 | NOMMU_MAP_WRITE; |
982 | break; | 1007 | break; |
983 | 1008 | ||
984 | default: | 1009 | default: |
@@ -989,9 +1014,9 @@ static int validate_mmap_request(struct file *file, | |||
989 | /* eliminate any capabilities that we can't support on this | 1014 | /* eliminate any capabilities that we can't support on this |
990 | * device */ | 1015 | * device */ |
991 | if (!file->f_op->get_unmapped_area) | 1016 | if (!file->f_op->get_unmapped_area) |
992 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 1017 | capabilities &= ~NOMMU_MAP_DIRECT; |
993 | if (!file->f_op->read) | 1018 | if (!file->f_op->read) |
994 | capabilities &= ~BDI_CAP_MAP_COPY; | 1019 | capabilities &= ~NOMMU_MAP_COPY; |
995 | 1020 | ||
996 | /* The file shall have been opened with read permission. */ | 1021 | /* The file shall have been opened with read permission. */ |
997 | if (!(file->f_mode & FMODE_READ)) | 1022 | if (!(file->f_mode & FMODE_READ)) |
@@ -1010,29 +1035,29 @@ static int validate_mmap_request(struct file *file, | |||
1010 | if (locks_verify_locked(file)) | 1035 | if (locks_verify_locked(file)) |
1011 | return -EAGAIN; | 1036 | return -EAGAIN; |
1012 | 1037 | ||
1013 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) | 1038 | if (!(capabilities & NOMMU_MAP_DIRECT)) |
1014 | return -ENODEV; | 1039 | return -ENODEV; |
1015 | 1040 | ||
1016 | /* we mustn't privatise shared mappings */ | 1041 | /* we mustn't privatise shared mappings */ |
1017 | capabilities &= ~BDI_CAP_MAP_COPY; | 1042 | capabilities &= ~NOMMU_MAP_COPY; |
1018 | } else { | 1043 | } else { |
1019 | /* we're going to read the file into private memory we | 1044 | /* we're going to read the file into private memory we |
1020 | * allocate */ | 1045 | * allocate */ |
1021 | if (!(capabilities & BDI_CAP_MAP_COPY)) | 1046 | if (!(capabilities & NOMMU_MAP_COPY)) |
1022 | return -ENODEV; | 1047 | return -ENODEV; |
1023 | 1048 | ||
1024 | /* we don't permit a private writable mapping to be | 1049 | /* we don't permit a private writable mapping to be |
1025 | * shared with the backing device */ | 1050 | * shared with the backing device */ |
1026 | if (prot & PROT_WRITE) | 1051 | if (prot & PROT_WRITE) |
1027 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 1052 | capabilities &= ~NOMMU_MAP_DIRECT; |
1028 | } | 1053 | } |
1029 | 1054 | ||
1030 | if (capabilities & BDI_CAP_MAP_DIRECT) { | 1055 | if (capabilities & NOMMU_MAP_DIRECT) { |
1031 | if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || | 1056 | if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) || |
1032 | ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || | 1057 | ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) || |
1033 | ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) | 1058 | ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC)) |
1034 | ) { | 1059 | ) { |
1035 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 1060 | capabilities &= ~NOMMU_MAP_DIRECT; |
1036 | if (flags & MAP_SHARED) { | 1061 | if (flags & MAP_SHARED) { |
1037 | printk(KERN_WARNING | 1062 | printk(KERN_WARNING |
1038 | "MAP_SHARED not completely supported on !MMU\n"); | 1063 | "MAP_SHARED not completely supported on !MMU\n"); |
@@ -1049,21 +1074,21 @@ static int validate_mmap_request(struct file *file, | |||
1049 | } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { | 1074 | } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { |
1050 | /* handle implication of PROT_EXEC by PROT_READ */ | 1075 | /* handle implication of PROT_EXEC by PROT_READ */ |
1051 | if (current->personality & READ_IMPLIES_EXEC) { | 1076 | if (current->personality & READ_IMPLIES_EXEC) { |
1052 | if (capabilities & BDI_CAP_EXEC_MAP) | 1077 | if (capabilities & NOMMU_MAP_EXEC) |
1053 | prot |= PROT_EXEC; | 1078 | prot |= PROT_EXEC; |
1054 | } | 1079 | } |
1055 | } else if ((prot & PROT_READ) && | 1080 | } else if ((prot & PROT_READ) && |
1056 | (prot & PROT_EXEC) && | 1081 | (prot & PROT_EXEC) && |
1057 | !(capabilities & BDI_CAP_EXEC_MAP) | 1082 | !(capabilities & NOMMU_MAP_EXEC) |
1058 | ) { | 1083 | ) { |
1059 | /* backing file is not executable, try to copy */ | 1084 | /* backing file is not executable, try to copy */ |
1060 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 1085 | capabilities &= ~NOMMU_MAP_DIRECT; |
1061 | } | 1086 | } |
1062 | } else { | 1087 | } else { |
1063 | /* anonymous mappings are always memory backed and can be | 1088 | /* anonymous mappings are always memory backed and can be |
1064 | * privately mapped | 1089 | * privately mapped |
1065 | */ | 1090 | */ |
1066 | capabilities = BDI_CAP_MAP_COPY; | 1091 | capabilities = NOMMU_MAP_COPY; |
1067 | 1092 | ||
1068 | /* handle PROT_EXEC implication by PROT_READ */ | 1093 | /* handle PROT_EXEC implication by PROT_READ */ |
1069 | if ((prot & PROT_READ) && | 1094 | if ((prot & PROT_READ) && |
@@ -1095,7 +1120,7 @@ static unsigned long determine_vm_flags(struct file *file, | |||
1095 | vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); | 1120 | vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); |
1096 | /* vm_flags |= mm->def_flags; */ | 1121 | /* vm_flags |= mm->def_flags; */ |
1097 | 1122 | ||
1098 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) { | 1123 | if (!(capabilities & NOMMU_MAP_DIRECT)) { |
1099 | /* attempt to share read-only copies of mapped file chunks */ | 1124 | /* attempt to share read-only copies of mapped file chunks */ |
1100 | vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | 1125 | vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
1101 | if (file && !(prot & PROT_WRITE)) | 1126 | if (file && !(prot & PROT_WRITE)) |
@@ -1104,7 +1129,7 @@ static unsigned long determine_vm_flags(struct file *file, | |||
1104 | /* overlay a shareable mapping on the backing device or inode | 1129 | /* overlay a shareable mapping on the backing device or inode |
1105 | * if possible - used for chardevs, ramfs/tmpfs/shmfs and | 1130 | * if possible - used for chardevs, ramfs/tmpfs/shmfs and |
1106 | * romfs/cramfs */ | 1131 | * romfs/cramfs */ |
1107 | vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); | 1132 | vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS); |
1108 | if (flags & MAP_SHARED) | 1133 | if (flags & MAP_SHARED) |
1109 | vm_flags |= VM_SHARED; | 1134 | vm_flags |= VM_SHARED; |
1110 | } | 1135 | } |
@@ -1157,7 +1182,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1157 | * shared mappings on devices or memory | 1182 | * shared mappings on devices or memory |
1158 | * - VM_MAYSHARE will be set if it may attempt to share | 1183 | * - VM_MAYSHARE will be set if it may attempt to share |
1159 | */ | 1184 | */ |
1160 | if (capabilities & BDI_CAP_MAP_DIRECT) { | 1185 | if (capabilities & NOMMU_MAP_DIRECT) { |
1161 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); | 1186 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); |
1162 | if (ret == 0) { | 1187 | if (ret == 0) { |
1163 | /* shouldn't return success if we're not sharing */ | 1188 | /* shouldn't return success if we're not sharing */ |
@@ -1346,7 +1371,7 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1346 | if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && | 1371 | if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && |
1347 | !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { | 1372 | !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { |
1348 | /* new mapping is not a subset of the region */ | 1373 | /* new mapping is not a subset of the region */ |
1349 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) | 1374 | if (!(capabilities & NOMMU_MAP_DIRECT)) |
1350 | goto sharing_violation; | 1375 | goto sharing_violation; |
1351 | continue; | 1376 | continue; |
1352 | } | 1377 | } |
@@ -1385,7 +1410,7 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1385 | * - this is the hook for quasi-memory character devices to | 1410 | * - this is the hook for quasi-memory character devices to |
1386 | * tell us the location of a shared mapping | 1411 | * tell us the location of a shared mapping |
1387 | */ | 1412 | */ |
1388 | if (capabilities & BDI_CAP_MAP_DIRECT) { | 1413 | if (capabilities & NOMMU_MAP_DIRECT) { |
1389 | addr = file->f_op->get_unmapped_area(file, addr, len, | 1414 | addr = file->f_op->get_unmapped_area(file, addr, len, |
1390 | pgoff, flags); | 1415 | pgoff, flags); |
1391 | if (IS_ERR_VALUE(addr)) { | 1416 | if (IS_ERR_VALUE(addr)) { |
@@ -1397,10 +1422,10 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1397 | * the mapping so we'll have to attempt to copy | 1422 | * the mapping so we'll have to attempt to copy |
1398 | * it */ | 1423 | * it */ |
1399 | ret = -ENODEV; | 1424 | ret = -ENODEV; |
1400 | if (!(capabilities & BDI_CAP_MAP_COPY)) | 1425 | if (!(capabilities & NOMMU_MAP_COPY)) |
1401 | goto error_just_free; | 1426 | goto error_just_free; |
1402 | 1427 | ||
1403 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 1428 | capabilities &= ~NOMMU_MAP_DIRECT; |
1404 | } else { | 1429 | } else { |
1405 | vma->vm_start = region->vm_start = addr; | 1430 | vma->vm_start = region->vm_start = addr; |
1406 | vma->vm_end = region->vm_end = addr + len; | 1431 | vma->vm_end = region->vm_end = addr + len; |
@@ -1411,7 +1436,7 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1411 | vma->vm_region = region; | 1436 | vma->vm_region = region; |
1412 | 1437 | ||
1413 | /* set up the mapping | 1438 | /* set up the mapping |
1414 | * - the region is filled in if BDI_CAP_MAP_DIRECT is still set | 1439 | * - the region is filled in if NOMMU_MAP_DIRECT is still set |
1415 | */ | 1440 | */ |
1416 | if (file && vma->vm_flags & VM_SHARED) | 1441 | if (file && vma->vm_flags & VM_SHARED) |
1417 | ret = do_mmap_shared_file(vma); | 1442 | ret = do_mmap_shared_file(vma); |
@@ -1894,7 +1919,7 @@ EXPORT_SYMBOL(unmap_mapping_range); | |||
1894 | */ | 1919 | */ |
1895 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | 1920 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) |
1896 | { | 1921 | { |
1897 | unsigned long free, allowed, reserve; | 1922 | long free, allowed, reserve; |
1898 | 1923 | ||
1899 | vm_acct_memory(pages); | 1924 | vm_acct_memory(pages); |
1900 | 1925 | ||
@@ -1958,7 +1983,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
1958 | */ | 1983 | */ |
1959 | if (mm) { | 1984 | if (mm) { |
1960 | reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); | 1985 | reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); |
1961 | allowed -= min(mm->total_vm / 32, reserve); | 1986 | allowed -= min_t(long, mm->total_vm / 32, reserve); |
1962 | } | 1987 | } |
1963 | 1988 | ||
1964 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) | 1989 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) |
@@ -1983,14 +2008,6 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1983 | } | 2008 | } |
1984 | EXPORT_SYMBOL(filemap_map_pages); | 2009 | EXPORT_SYMBOL(filemap_map_pages); |
1985 | 2010 | ||
1986 | int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, | ||
1987 | unsigned long size, pgoff_t pgoff) | ||
1988 | { | ||
1989 | BUG(); | ||
1990 | return 0; | ||
1991 | } | ||
1992 | EXPORT_SYMBOL(generic_file_remap_pages); | ||
1993 | |||
1994 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | 2011 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
1995 | unsigned long addr, void *buf, int len, int write) | 2012 | unsigned long addr, void *buf, int len, int write) |
1996 | { | 2013 | { |