aboutsummaryrefslogtreecommitdiffstats
path: root/mm/nommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c245
1 files changed, 192 insertions, 53 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index d99dea31e443..564540662192 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -122,26 +122,50 @@ unsigned int kobjsize(const void *objp)
122} 122}
123 123
124/* 124/*
125 * The nommu dodgy version :-) 125 * get a list of pages in an address range belonging to the specified process
126 * and indicate the VMA that covers each page
127 * - this is potentially dodgy as we may end incrementing the page count of a
128 * slab page or a secondary page from a compound page
129 * - don't permit access to VMAs that don't support it, such as I/O mappings
126 */ 130 */
127int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 131int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
128 unsigned long start, int len, int write, int force, 132 unsigned long start, int len, int write, int force,
129 struct page **pages, struct vm_area_struct **vmas) 133 struct page **pages, struct vm_area_struct **vmas)
130{ 134{
135 struct vm_area_struct *vma;
136 unsigned long vm_flags;
131 int i; 137 int i;
132 static struct vm_area_struct dummy_vma; 138
139 /* calculate required read or write permissions.
140 * - if 'force' is set, we only require the "MAY" flags.
141 */
142 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
143 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
133 144
134 for (i = 0; i < len; i++) { 145 for (i = 0; i < len; i++) {
146 vma = find_vma(mm, start);
147 if (!vma)
148 goto finish_or_fault;
149
150 /* protect what we can, including chardevs */
151 if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
152 !(vm_flags & vma->vm_flags))
153 goto finish_or_fault;
154
135 if (pages) { 155 if (pages) {
136 pages[i] = virt_to_page(start); 156 pages[i] = virt_to_page(start);
137 if (pages[i]) 157 if (pages[i])
138 page_cache_get(pages[i]); 158 page_cache_get(pages[i]);
139 } 159 }
140 if (vmas) 160 if (vmas)
141 vmas[i] = &dummy_vma; 161 vmas[i] = vma;
142 start += PAGE_SIZE; 162 start += PAGE_SIZE;
143 } 163 }
144 return(i); 164
165 return i;
166
167finish_or_fault:
168 return i ? : -EFAULT;
145} 169}
146 170
147EXPORT_SYMBOL(get_user_pages); 171EXPORT_SYMBOL(get_user_pages);
@@ -286,6 +310,77 @@ static void show_process_blocks(void)
286} 310}
287#endif /* DEBUG */ 311#endif /* DEBUG */
288 312
313/*
314 * add a VMA into a process's mm_struct in the appropriate place in the list
315 * - should be called with mm->mmap_sem held writelocked
316 */
317static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml)
318{
319 struct vm_list_struct **ppv;
320
321 for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next)
322 if ((*ppv)->vma->vm_start > vml->vma->vm_start)
323 break;
324
325 vml->next = *ppv;
326 *ppv = vml;
327}
328
329/*
330 * look up the first VMA in which addr resides, NULL if none
331 * - should be called with mm->mmap_sem at least held readlocked
332 */
333struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
334{
335 struct vm_list_struct *loop, *vml;
336
337 /* search the vm_start ordered list */
338 vml = NULL;
339 for (loop = mm->context.vmlist; loop; loop = loop->next) {
340 if (loop->vma->vm_start > addr)
341 break;
342 vml = loop;
343 }
344
345 if (vml && vml->vma->vm_end > addr)
346 return vml->vma;
347
348 return NULL;
349}
350EXPORT_SYMBOL(find_vma);
351
352/*
353 * find a VMA
354 * - we don't extend stack VMAs under NOMMU conditions
355 */
356struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
357{
358 return find_vma(mm, addr);
359}
360
361/*
362 * look up the first VMA exactly that exactly matches addr
363 * - should be called with mm->mmap_sem at least held readlocked
364 */
365static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
366 unsigned long addr)
367{
368 struct vm_list_struct *vml;
369
370 /* search the vm_start ordered list */
371 for (vml = mm->context.vmlist; vml; vml = vml->next) {
372 if (vml->vma->vm_start == addr)
373 return vml->vma;
374 if (vml->vma->vm_start > addr)
375 break;
376 }
377
378 return NULL;
379}
380
381/*
382 * find a VMA in the global tree
383 */
289static inline struct vm_area_struct *find_nommu_vma(unsigned long start) 384static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
290{ 385{
291 struct vm_area_struct *vma; 386 struct vm_area_struct *vma;
@@ -305,6 +400,9 @@ static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
305 return NULL; 400 return NULL;
306} 401}
307 402
403/*
404 * add a VMA in the global tree
405 */
308static void add_nommu_vma(struct vm_area_struct *vma) 406static void add_nommu_vma(struct vm_area_struct *vma)
309{ 407{
310 struct vm_area_struct *pvma; 408 struct vm_area_struct *pvma;
@@ -351,6 +449,9 @@ static void add_nommu_vma(struct vm_area_struct *vma)
351 rb_insert_color(&vma->vm_rb, &nommu_vma_tree); 449 rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
352} 450}
353 451
452/*
453 * delete a VMA from the global list
454 */
354static void delete_nommu_vma(struct vm_area_struct *vma) 455static void delete_nommu_vma(struct vm_area_struct *vma)
355{ 456{
356 struct address_space *mapping; 457 struct address_space *mapping;
@@ -828,8 +929,7 @@ unsigned long do_mmap_pgoff(struct file *file,
828 realalloc += kobjsize(vml); 929 realalloc += kobjsize(vml);
829 askedalloc += sizeof(*vml); 930 askedalloc += sizeof(*vml);
830 931
831 vml->next = current->mm->context.vmlist; 932 add_vma_to_mm(current->mm, vml);
832 current->mm->context.vmlist = vml;
833 933
834 up_write(&nommu_vma_sem); 934 up_write(&nommu_vma_sem);
835 935
@@ -908,6 +1008,11 @@ static void put_vma(struct vm_area_struct *vma)
908 } 1008 }
909} 1009}
910 1010
1011/*
1012 * release a mapping
1013 * - under NOMMU conditions the parameters must match exactly to the mapping to
1014 * be removed
1015 */
911int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) 1016int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
912{ 1017{
913 struct vm_list_struct *vml, **parent; 1018 struct vm_list_struct *vml, **parent;
@@ -917,10 +1022,13 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
917 printk("do_munmap:\n"); 1022 printk("do_munmap:\n");
918#endif 1023#endif
919 1024
920 for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) 1025 for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) {
1026 if ((*parent)->vma->vm_start > addr)
1027 break;
921 if ((*parent)->vma->vm_start == addr && 1028 if ((*parent)->vma->vm_start == addr &&
922 ((len == 0) || ((*parent)->vma->vm_end == end))) 1029 ((len == 0) || ((*parent)->vma->vm_end == end)))
923 goto found; 1030 goto found;
1031 }
924 1032
925 printk("munmap of non-mmaped memory by process %d (%s): %p\n", 1033 printk("munmap of non-mmaped memory by process %d (%s): %p\n",
926 current->pid, current->comm, (void *) addr); 1034 current->pid, current->comm, (void *) addr);
@@ -946,7 +1054,20 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
946 return 0; 1054 return 0;
947} 1055}
948 1056
949/* Release all mmaps. */ 1057asmlinkage long sys_munmap(unsigned long addr, size_t len)
1058{
1059 int ret;
1060 struct mm_struct *mm = current->mm;
1061
1062 down_write(&mm->mmap_sem);
1063 ret = do_munmap(mm, addr, len);
1064 up_write(&mm->mmap_sem);
1065 return ret;
1066}
1067
1068/*
1069 * Release all mappings
1070 */
950void exit_mmap(struct mm_struct * mm) 1071void exit_mmap(struct mm_struct * mm)
951{ 1072{
952 struct vm_list_struct *tmp; 1073 struct vm_list_struct *tmp;
@@ -973,37 +1094,26 @@ void exit_mmap(struct mm_struct * mm)
973 } 1094 }
974} 1095}
975 1096
976asmlinkage long sys_munmap(unsigned long addr, size_t len)
977{
978 int ret;
979 struct mm_struct *mm = current->mm;
980
981 down_write(&mm->mmap_sem);
982 ret = do_munmap(mm, addr, len);
983 up_write(&mm->mmap_sem);
984 return ret;
985}
986
987unsigned long do_brk(unsigned long addr, unsigned long len) 1097unsigned long do_brk(unsigned long addr, unsigned long len)
988{ 1098{
989 return -ENOMEM; 1099 return -ENOMEM;
990} 1100}
991 1101
992/* 1102/*
993 * Expand (or shrink) an existing mapping, potentially moving it at the 1103 * expand (or shrink) an existing mapping, potentially moving it at the same
994 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 1104 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
995 * 1105 *
996 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 1106 * under NOMMU conditions, we only permit changing a mapping's size, and only
997 * This option implies MREMAP_MAYMOVE. 1107 * as long as it stays within the hole allocated by the kmalloc() call in
1108 * do_mmap_pgoff() and the block is not shareable
998 * 1109 *
999 * on uClinux, we only permit changing a mapping's size, and only as long as it stays within the 1110 * MREMAP_FIXED is not supported under NOMMU conditions
1000 * hole allocated by the kmalloc() call in do_mmap_pgoff() and the block is not shareable
1001 */ 1111 */
1002unsigned long do_mremap(unsigned long addr, 1112unsigned long do_mremap(unsigned long addr,
1003 unsigned long old_len, unsigned long new_len, 1113 unsigned long old_len, unsigned long new_len,
1004 unsigned long flags, unsigned long new_addr) 1114 unsigned long flags, unsigned long new_addr)
1005{ 1115{
1006 struct vm_list_struct *vml = NULL; 1116 struct vm_area_struct *vma;
1007 1117
1008 /* insanity checks first */ 1118 /* insanity checks first */
1009 if (new_len == 0) 1119 if (new_len == 0)
@@ -1012,58 +1122,46 @@ unsigned long do_mremap(unsigned long addr,
1012 if (flags & MREMAP_FIXED && new_addr != addr) 1122 if (flags & MREMAP_FIXED && new_addr != addr)
1013 return (unsigned long) -EINVAL; 1123 return (unsigned long) -EINVAL;
1014 1124
1015 for (vml = current->mm->context.vmlist; vml; vml = vml->next) 1125 vma = find_vma_exact(current->mm, addr);
1016 if (vml->vma->vm_start == addr) 1126 if (!vma)
1017 goto found; 1127 return (unsigned long) -EINVAL;
1018
1019 return (unsigned long) -EINVAL;
1020 1128
1021 found: 1129 if (vma->vm_end != vma->vm_start + old_len)
1022 if (vml->vma->vm_end != vml->vma->vm_start + old_len)
1023 return (unsigned long) -EFAULT; 1130 return (unsigned long) -EFAULT;
1024 1131
1025 if (vml->vma->vm_flags & VM_MAYSHARE) 1132 if (vma->vm_flags & VM_MAYSHARE)
1026 return (unsigned long) -EPERM; 1133 return (unsigned long) -EPERM;
1027 1134
1028 if (new_len > kobjsize((void *) addr)) 1135 if (new_len > kobjsize((void *) addr))
1029 return (unsigned long) -ENOMEM; 1136 return (unsigned long) -ENOMEM;
1030 1137
1031 /* all checks complete - do it */ 1138 /* all checks complete - do it */
1032 vml->vma->vm_end = vml->vma->vm_start + new_len; 1139 vma->vm_end = vma->vm_start + new_len;
1033 1140
1034 askedalloc -= old_len; 1141 askedalloc -= old_len;
1035 askedalloc += new_len; 1142 askedalloc += new_len;
1036 1143
1037 return vml->vma->vm_start; 1144 return vma->vm_start;
1038} 1145}
1039 1146
1040/* 1147asmlinkage unsigned long sys_mremap(unsigned long addr,
1041 * Look up the first VMA which satisfies addr < vm_end, NULL if none 1148 unsigned long old_len, unsigned long new_len,
1042 */ 1149 unsigned long flags, unsigned long new_addr)
1043struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1044{ 1150{
1045 struct vm_list_struct *vml; 1151 unsigned long ret;
1046
1047 for (vml = mm->context.vmlist; vml; vml = vml->next)
1048 if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
1049 return vml->vma;
1050 1152
1051 return NULL; 1153 down_write(&current->mm->mmap_sem);
1154 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1155 up_write(&current->mm->mmap_sem);
1156 return ret;
1052} 1157}
1053 1158
1054EXPORT_SYMBOL(find_vma);
1055
1056struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 1159struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1057 unsigned int foll_flags) 1160 unsigned int foll_flags)
1058{ 1161{
1059 return NULL; 1162 return NULL;
1060} 1163}
1061 1164
1062struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
1063{
1064 return NULL;
1065}
1066
1067int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, 1165int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1068 unsigned long to, unsigned long size, pgprot_t prot) 1166 unsigned long to, unsigned long size, pgprot_t prot)
1069{ 1167{
@@ -1206,3 +1304,44 @@ struct page *filemap_nopage(struct vm_area_struct *area,
1206 BUG(); 1304 BUG();
1207 return NULL; 1305 return NULL;
1208} 1306}
1307
1308/*
1309 * Access another process' address space.
1310 * - source/target buffer must be kernel space
1311 */
1312int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1313{
1314 struct vm_area_struct *vma;
1315 struct mm_struct *mm;
1316
1317 if (addr + len < addr)
1318 return 0;
1319
1320 mm = get_task_mm(tsk);
1321 if (!mm)
1322 return 0;
1323
1324 down_read(&mm->mmap_sem);
1325
1326 /* the access must start within one of the target process's mappings */
1327 vma = find_vma(mm, addr);
1328 if (vma) {
1329 /* don't overrun this mapping */
1330 if (addr + len >= vma->vm_end)
1331 len = vma->vm_end - addr;
1332
1333 /* only read or write mappings where it is permitted */
1334 if (write && vma->vm_flags & VM_MAYWRITE)
1335 len -= copy_to_user((void *) addr, buf, len);
1336 else if (!write && vma->vm_flags & VM_MAYREAD)
1337 len -= copy_from_user(buf, (void *) addr, len);
1338 else
1339 len = 0;
1340 } else {
1341 len = 0;
1342 }
1343
1344 up_read(&mm->mmap_sem);
1345 mmput(mm);
1346 return len;
1347}