diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /mm/nommu.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'mm/nommu.c')
-rw-r--r-- | mm/nommu.c | 259 |
1 files changed, 178 insertions, 81 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 88ff091eb07a..9edc897a3970 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> | 10 | * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> |
11 | * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> | 11 | * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> |
12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> | 12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> |
13 | * Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org> | 13 | * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/personality.h> | 29 | #include <linux/personality.h> |
30 | #include <linux/security.h> | 30 | #include <linux/security.h> |
31 | #include <linux/syscalls.h> | 31 | #include <linux/syscalls.h> |
32 | #include <linux/audit.h> | ||
32 | 33 | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | #include <asm/tlb.h> | 35 | #include <asm/tlb.h> |
@@ -126,7 +127,8 @@ unsigned int kobjsize(const void *objp) | |||
126 | 127 | ||
127 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 128 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
128 | unsigned long start, int nr_pages, unsigned int foll_flags, | 129 | unsigned long start, int nr_pages, unsigned int foll_flags, |
129 | struct page **pages, struct vm_area_struct **vmas) | 130 | struct page **pages, struct vm_area_struct **vmas, |
131 | int *retry) | ||
130 | { | 132 | { |
131 | struct vm_area_struct *vma; | 133 | struct vm_area_struct *vma; |
132 | unsigned long vm_flags; | 134 | unsigned long vm_flags; |
@@ -184,7 +186,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
184 | if (force) | 186 | if (force) |
185 | flags |= FOLL_FORCE; | 187 | flags |= FOLL_FORCE; |
186 | 188 | ||
187 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); | 189 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, |
190 | NULL); | ||
188 | } | 191 | } |
189 | EXPORT_SYMBOL(get_user_pages); | 192 | EXPORT_SYMBOL(get_user_pages); |
190 | 193 | ||
@@ -293,12 +296,60 @@ void *vmalloc(unsigned long size) | |||
293 | } | 296 | } |
294 | EXPORT_SYMBOL(vmalloc); | 297 | EXPORT_SYMBOL(vmalloc); |
295 | 298 | ||
299 | /* | ||
300 | * vzalloc - allocate virtually continguos memory with zero fill | ||
301 | * | ||
302 | * @size: allocation size | ||
303 | * | ||
304 | * Allocate enough pages to cover @size from the page level | ||
305 | * allocator and map them into continguos kernel virtual space. | ||
306 | * The memory allocated is set to zero. | ||
307 | * | ||
308 | * For tight control over page level allocator and protection flags | ||
309 | * use __vmalloc() instead. | ||
310 | */ | ||
311 | void *vzalloc(unsigned long size) | ||
312 | { | ||
313 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | ||
314 | PAGE_KERNEL); | ||
315 | } | ||
316 | EXPORT_SYMBOL(vzalloc); | ||
317 | |||
318 | /** | ||
319 | * vmalloc_node - allocate memory on a specific node | ||
320 | * @size: allocation size | ||
321 | * @node: numa node | ||
322 | * | ||
323 | * Allocate enough pages to cover @size from the page level | ||
324 | * allocator and map them into contiguous kernel virtual space. | ||
325 | * | ||
326 | * For tight control over page level allocator and protection flags | ||
327 | * use __vmalloc() instead. | ||
328 | */ | ||
296 | void *vmalloc_node(unsigned long size, int node) | 329 | void *vmalloc_node(unsigned long size, int node) |
297 | { | 330 | { |
298 | return vmalloc(size); | 331 | return vmalloc(size); |
299 | } | 332 | } |
300 | EXPORT_SYMBOL(vmalloc_node); | 333 | EXPORT_SYMBOL(vmalloc_node); |
301 | 334 | ||
335 | /** | ||
336 | * vzalloc_node - allocate memory on a specific node with zero fill | ||
337 | * @size: allocation size | ||
338 | * @node: numa node | ||
339 | * | ||
340 | * Allocate enough pages to cover @size from the page level | ||
341 | * allocator and map them into contiguous kernel virtual space. | ||
342 | * The memory allocated is set to zero. | ||
343 | * | ||
344 | * For tight control over page level allocator and protection flags | ||
345 | * use __vmalloc() instead. | ||
346 | */ | ||
347 | void *vzalloc_node(unsigned long size, int node) | ||
348 | { | ||
349 | return vzalloc(size); | ||
350 | } | ||
351 | EXPORT_SYMBOL(vzalloc_node); | ||
352 | |||
302 | #ifndef PAGE_KERNEL_EXEC | 353 | #ifndef PAGE_KERNEL_EXEC |
303 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | 354 | # define PAGE_KERNEL_EXEC PAGE_KERNEL |
304 | #endif | 355 | #endif |
@@ -392,6 +443,31 @@ void __attribute__((weak)) vmalloc_sync_all(void) | |||
392 | { | 443 | { |
393 | } | 444 | } |
394 | 445 | ||
446 | /** | ||
447 | * alloc_vm_area - allocate a range of kernel address space | ||
448 | * @size: size of the area | ||
449 | * | ||
450 | * Returns: NULL on failure, vm_struct on success | ||
451 | * | ||
452 | * This function reserves a range of kernel address space, and | ||
453 | * allocates pagetables to map that range. No actual mappings | ||
454 | * are created. If the kernel address space is not shared | ||
455 | * between processes, it syncs the pagetable across all | ||
456 | * processes. | ||
457 | */ | ||
458 | struct vm_struct *alloc_vm_area(size_t size) | ||
459 | { | ||
460 | BUG(); | ||
461 | return NULL; | ||
462 | } | ||
463 | EXPORT_SYMBOL_GPL(alloc_vm_area); | ||
464 | |||
465 | void free_vm_area(struct vm_struct *area) | ||
466 | { | ||
467 | BUG(); | ||
468 | } | ||
469 | EXPORT_SYMBOL_GPL(free_vm_area); | ||
470 | |||
395 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, | 471 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, |
396 | struct page *page) | 472 | struct page *page) |
397 | { | 473 | { |
@@ -604,9 +680,9 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags) | |||
604 | */ | 680 | */ |
605 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | 681 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
606 | { | 682 | { |
607 | struct vm_area_struct *pvma, **pp, *next; | 683 | struct vm_area_struct *pvma, *prev; |
608 | struct address_space *mapping; | 684 | struct address_space *mapping; |
609 | struct rb_node **p, *parent; | 685 | struct rb_node **p, *parent, *rb_prev; |
610 | 686 | ||
611 | kenter(",%p", vma); | 687 | kenter(",%p", vma); |
612 | 688 | ||
@@ -627,7 +703,7 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
627 | } | 703 | } |
628 | 704 | ||
629 | /* add the VMA to the tree */ | 705 | /* add the VMA to the tree */ |
630 | parent = NULL; | 706 | parent = rb_prev = NULL; |
631 | p = &mm->mm_rb.rb_node; | 707 | p = &mm->mm_rb.rb_node; |
632 | while (*p) { | 708 | while (*p) { |
633 | parent = *p; | 709 | parent = *p; |
@@ -637,17 +713,20 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
637 | * (the latter is necessary as we may get identical VMAs) */ | 713 | * (the latter is necessary as we may get identical VMAs) */ |
638 | if (vma->vm_start < pvma->vm_start) | 714 | if (vma->vm_start < pvma->vm_start) |
639 | p = &(*p)->rb_left; | 715 | p = &(*p)->rb_left; |
640 | else if (vma->vm_start > pvma->vm_start) | 716 | else if (vma->vm_start > pvma->vm_start) { |
717 | rb_prev = parent; | ||
641 | p = &(*p)->rb_right; | 718 | p = &(*p)->rb_right; |
642 | else if (vma->vm_end < pvma->vm_end) | 719 | } else if (vma->vm_end < pvma->vm_end) |
643 | p = &(*p)->rb_left; | 720 | p = &(*p)->rb_left; |
644 | else if (vma->vm_end > pvma->vm_end) | 721 | else if (vma->vm_end > pvma->vm_end) { |
722 | rb_prev = parent; | ||
645 | p = &(*p)->rb_right; | 723 | p = &(*p)->rb_right; |
646 | else if (vma < pvma) | 724 | } else if (vma < pvma) |
647 | p = &(*p)->rb_left; | 725 | p = &(*p)->rb_left; |
648 | else if (vma > pvma) | 726 | else if (vma > pvma) { |
727 | rb_prev = parent; | ||
649 | p = &(*p)->rb_right; | 728 | p = &(*p)->rb_right; |
650 | else | 729 | } else |
651 | BUG(); | 730 | BUG(); |
652 | } | 731 | } |
653 | 732 | ||
@@ -655,20 +734,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
655 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); | 734 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); |
656 | 735 | ||
657 | /* add VMA to the VMA list also */ | 736 | /* add VMA to the VMA list also */ |
658 | for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) { | 737 | prev = NULL; |
659 | if (pvma->vm_start > vma->vm_start) | 738 | if (rb_prev) |
660 | break; | 739 | prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); |
661 | if (pvma->vm_start < vma->vm_start) | ||
662 | continue; | ||
663 | if (pvma->vm_end < vma->vm_end) | ||
664 | break; | ||
665 | } | ||
666 | 740 | ||
667 | next = *pp; | 741 | __vma_link_list(mm, vma, prev, parent); |
668 | *pp = vma; | ||
669 | vma->vm_next = next; | ||
670 | if (next) | ||
671 | next->vm_prev = vma; | ||
672 | } | 742 | } |
673 | 743 | ||
674 | /* | 744 | /* |
@@ -676,7 +746,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
676 | */ | 746 | */ |
677 | static void delete_vma_from_mm(struct vm_area_struct *vma) | 747 | static void delete_vma_from_mm(struct vm_area_struct *vma) |
678 | { | 748 | { |
679 | struct vm_area_struct **pp; | ||
680 | struct address_space *mapping; | 749 | struct address_space *mapping; |
681 | struct mm_struct *mm = vma->vm_mm; | 750 | struct mm_struct *mm = vma->vm_mm; |
682 | 751 | ||
@@ -699,12 +768,14 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
699 | 768 | ||
700 | /* remove from the MM's tree and list */ | 769 | /* remove from the MM's tree and list */ |
701 | rb_erase(&vma->vm_rb, &mm->mm_rb); | 770 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
702 | for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) { | 771 | |
703 | if (*pp == vma) { | 772 | if (vma->vm_prev) |
704 | *pp = vma->vm_next; | 773 | vma->vm_prev->vm_next = vma->vm_next; |
705 | break; | 774 | else |
706 | } | 775 | mm->mmap = vma->vm_next; |
707 | } | 776 | |
777 | if (vma->vm_next) | ||
778 | vma->vm_next->vm_prev = vma->vm_prev; | ||
708 | 779 | ||
709 | vma->vm_mm = NULL; | 780 | vma->vm_mm = NULL; |
710 | } | 781 | } |
@@ -733,17 +804,15 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) | |||
733 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | 804 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) |
734 | { | 805 | { |
735 | struct vm_area_struct *vma; | 806 | struct vm_area_struct *vma; |
736 | struct rb_node *n = mm->mm_rb.rb_node; | ||
737 | 807 | ||
738 | /* check the cache first */ | 808 | /* check the cache first */ |
739 | vma = mm->mmap_cache; | 809 | vma = mm->mmap_cache; |
740 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) | 810 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) |
741 | return vma; | 811 | return vma; |
742 | 812 | ||
743 | /* trawl the tree (there may be multiple mappings in which addr | 813 | /* trawl the list (there may be multiple mappings in which addr |
744 | * resides) */ | 814 | * resides) */ |
745 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | 815 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
746 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
747 | if (vma->vm_start > addr) | 816 | if (vma->vm_start > addr) |
748 | return NULL; | 817 | return NULL; |
749 | if (vma->vm_end > addr) { | 818 | if (vma->vm_end > addr) { |
@@ -783,7 +852,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, | |||
783 | unsigned long len) | 852 | unsigned long len) |
784 | { | 853 | { |
785 | struct vm_area_struct *vma; | 854 | struct vm_area_struct *vma; |
786 | struct rb_node *n = mm->mm_rb.rb_node; | ||
787 | unsigned long end = addr + len; | 855 | unsigned long end = addr + len; |
788 | 856 | ||
789 | /* check the cache first */ | 857 | /* check the cache first */ |
@@ -791,10 +859,9 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, | |||
791 | if (vma && vma->vm_start == addr && vma->vm_end == end) | 859 | if (vma && vma->vm_start == addr && vma->vm_end == end) |
792 | return vma; | 860 | return vma; |
793 | 861 | ||
794 | /* trawl the tree (there may be multiple mappings in which addr | 862 | /* trawl the list (there may be multiple mappings in which addr |
795 | * resides) */ | 863 | * resides) */ |
796 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | 864 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
797 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
798 | if (vma->vm_start < addr) | 865 | if (vma->vm_start < addr) |
799 | continue; | 866 | continue; |
800 | if (vma->vm_start > addr) | 867 | if (vma->vm_start > addr) |
@@ -1057,7 +1124,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1057 | unsigned long capabilities) | 1124 | unsigned long capabilities) |
1058 | { | 1125 | { |
1059 | struct page *pages; | 1126 | struct page *pages; |
1060 | unsigned long total, point, n, rlen; | 1127 | unsigned long total, point, n; |
1061 | void *base; | 1128 | void *base; |
1062 | int ret, order; | 1129 | int ret, order; |
1063 | 1130 | ||
@@ -1081,13 +1148,12 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1081 | * make a private copy of the data and map that instead */ | 1148 | * make a private copy of the data and map that instead */ |
1082 | } | 1149 | } |
1083 | 1150 | ||
1084 | rlen = PAGE_ALIGN(len); | ||
1085 | 1151 | ||
1086 | /* allocate some memory to hold the mapping | 1152 | /* allocate some memory to hold the mapping |
1087 | * - note that this may not return a page-aligned address if the object | 1153 | * - note that this may not return a page-aligned address if the object |
1088 | * we're allocating is smaller than a page | 1154 | * we're allocating is smaller than a page |
1089 | */ | 1155 | */ |
1090 | order = get_order(rlen); | 1156 | order = get_order(len); |
1091 | kdebug("alloc order %d for %lx", order, len); | 1157 | kdebug("alloc order %d for %lx", order, len); |
1092 | 1158 | ||
1093 | pages = alloc_pages(GFP_KERNEL, order); | 1159 | pages = alloc_pages(GFP_KERNEL, order); |
@@ -1097,7 +1163,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1097 | total = 1 << order; | 1163 | total = 1 << order; |
1098 | atomic_long_add(total, &mmap_pages_allocated); | 1164 | atomic_long_add(total, &mmap_pages_allocated); |
1099 | 1165 | ||
1100 | point = rlen >> PAGE_SHIFT; | 1166 | point = len >> PAGE_SHIFT; |
1101 | 1167 | ||
1102 | /* we allocated a power-of-2 sized page set, so we may want to trim off | 1168 | /* we allocated a power-of-2 sized page set, so we may want to trim off |
1103 | * the excess */ | 1169 | * the excess */ |
@@ -1119,7 +1185,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1119 | base = page_address(pages); | 1185 | base = page_address(pages); |
1120 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; | 1186 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; |
1121 | region->vm_start = (unsigned long) base; | 1187 | region->vm_start = (unsigned long) base; |
1122 | region->vm_end = region->vm_start + rlen; | 1188 | region->vm_end = region->vm_start + len; |
1123 | region->vm_top = region->vm_start + (total << PAGE_SHIFT); | 1189 | region->vm_top = region->vm_start + (total << PAGE_SHIFT); |
1124 | 1190 | ||
1125 | vma->vm_start = region->vm_start; | 1191 | vma->vm_start = region->vm_start; |
@@ -1135,22 +1201,22 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1135 | 1201 | ||
1136 | old_fs = get_fs(); | 1202 | old_fs = get_fs(); |
1137 | set_fs(KERNEL_DS); | 1203 | set_fs(KERNEL_DS); |
1138 | ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); | 1204 | ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos); |
1139 | set_fs(old_fs); | 1205 | set_fs(old_fs); |
1140 | 1206 | ||
1141 | if (ret < 0) | 1207 | if (ret < 0) |
1142 | goto error_free; | 1208 | goto error_free; |
1143 | 1209 | ||
1144 | /* clear the last little bit */ | 1210 | /* clear the last little bit */ |
1145 | if (ret < rlen) | 1211 | if (ret < len) |
1146 | memset(base + ret, 0, rlen - ret); | 1212 | memset(base + ret, 0, len - ret); |
1147 | 1213 | ||
1148 | } | 1214 | } |
1149 | 1215 | ||
1150 | return 0; | 1216 | return 0; |
1151 | 1217 | ||
1152 | error_free: | 1218 | error_free: |
1153 | free_page_series(region->vm_start, region->vm_end); | 1219 | free_page_series(region->vm_start, region->vm_top); |
1154 | region->vm_start = vma->vm_start = 0; | 1220 | region->vm_start = vma->vm_start = 0; |
1155 | region->vm_end = vma->vm_end = 0; | 1221 | region->vm_end = vma->vm_end = 0; |
1156 | region->vm_top = 0; | 1222 | region->vm_top = 0; |
@@ -1159,7 +1225,7 @@ error_free: | |||
1159 | enomem: | 1225 | enomem: |
1160 | printk("Allocation of length %lu from process %d (%s) failed\n", | 1226 | printk("Allocation of length %lu from process %d (%s) failed\n", |
1161 | len, current->pid, current->comm); | 1227 | len, current->pid, current->comm); |
1162 | show_free_areas(); | 1228 | show_free_areas(0); |
1163 | return -ENOMEM; | 1229 | return -ENOMEM; |
1164 | } | 1230 | } |
1165 | 1231 | ||
@@ -1192,6 +1258,7 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1192 | 1258 | ||
1193 | /* we ignore the address hint */ | 1259 | /* we ignore the address hint */ |
1194 | addr = 0; | 1260 | addr = 0; |
1261 | len = PAGE_ALIGN(len); | ||
1195 | 1262 | ||
1196 | /* we've determined that we can make the mapping, now translate what we | 1263 | /* we've determined that we can make the mapping, now translate what we |
1197 | * now know into VMA flags */ | 1264 | * now know into VMA flags */ |
@@ -1309,15 +1376,15 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1309 | if (capabilities & BDI_CAP_MAP_DIRECT) { | 1376 | if (capabilities & BDI_CAP_MAP_DIRECT) { |
1310 | addr = file->f_op->get_unmapped_area(file, addr, len, | 1377 | addr = file->f_op->get_unmapped_area(file, addr, len, |
1311 | pgoff, flags); | 1378 | pgoff, flags); |
1312 | if (IS_ERR((void *) addr)) { | 1379 | if (IS_ERR_VALUE(addr)) { |
1313 | ret = addr; | 1380 | ret = addr; |
1314 | if (ret != (unsigned long) -ENOSYS) | 1381 | if (ret != -ENOSYS) |
1315 | goto error_just_free; | 1382 | goto error_just_free; |
1316 | 1383 | ||
1317 | /* the driver refused to tell us where to site | 1384 | /* the driver refused to tell us where to site |
1318 | * the mapping so we'll have to attempt to copy | 1385 | * the mapping so we'll have to attempt to copy |
1319 | * it */ | 1386 | * it */ |
1320 | ret = (unsigned long) -ENODEV; | 1387 | ret = -ENODEV; |
1321 | if (!(capabilities & BDI_CAP_MAP_COPY)) | 1388 | if (!(capabilities & BDI_CAP_MAP_COPY)) |
1322 | goto error_just_free; | 1389 | goto error_just_free; |
1323 | 1390 | ||
@@ -1392,14 +1459,14 @@ error_getting_vma: | |||
1392 | printk(KERN_WARNING "Allocation of vma for %lu byte allocation" | 1459 | printk(KERN_WARNING "Allocation of vma for %lu byte allocation" |
1393 | " from process %d failed\n", | 1460 | " from process %d failed\n", |
1394 | len, current->pid); | 1461 | len, current->pid); |
1395 | show_free_areas(); | 1462 | show_free_areas(0); |
1396 | return -ENOMEM; | 1463 | return -ENOMEM; |
1397 | 1464 | ||
1398 | error_getting_region: | 1465 | error_getting_region: |
1399 | printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" | 1466 | printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" |
1400 | " from process %d failed\n", | 1467 | " from process %d failed\n", |
1401 | len, current->pid); | 1468 | len, current->pid); |
1402 | show_free_areas(); | 1469 | show_free_areas(0); |
1403 | return -ENOMEM; | 1470 | return -ENOMEM; |
1404 | } | 1471 | } |
1405 | EXPORT_SYMBOL(do_mmap_pgoff); | 1472 | EXPORT_SYMBOL(do_mmap_pgoff); |
@@ -1411,6 +1478,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | |||
1411 | struct file *file = NULL; | 1478 | struct file *file = NULL; |
1412 | unsigned long retval = -EBADF; | 1479 | unsigned long retval = -EBADF; |
1413 | 1480 | ||
1481 | audit_mmap_fd(fd, flags); | ||
1414 | if (!(flags & MAP_ANONYMOUS)) { | 1482 | if (!(flags & MAP_ANONYMOUS)) { |
1415 | file = fget(fd); | 1483 | file = fget(fd); |
1416 | if (!file) | 1484 | if (!file) |
@@ -1567,15 +1635,17 @@ static int shrink_vma(struct mm_struct *mm, | |||
1567 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | 1635 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) |
1568 | { | 1636 | { |
1569 | struct vm_area_struct *vma; | 1637 | struct vm_area_struct *vma; |
1570 | struct rb_node *rb; | 1638 | unsigned long end; |
1571 | unsigned long end = start + len; | ||
1572 | int ret; | 1639 | int ret; |
1573 | 1640 | ||
1574 | kenter(",%lx,%zx", start, len); | 1641 | kenter(",%lx,%zx", start, len); |
1575 | 1642 | ||
1643 | len = PAGE_ALIGN(len); | ||
1576 | if (len == 0) | 1644 | if (len == 0) |
1577 | return -EINVAL; | 1645 | return -EINVAL; |
1578 | 1646 | ||
1647 | end = start + len; | ||
1648 | |||
1579 | /* find the first potentially overlapping VMA */ | 1649 | /* find the first potentially overlapping VMA */ |
1580 | vma = find_vma(mm, start); | 1650 | vma = find_vma(mm, start); |
1581 | if (!vma) { | 1651 | if (!vma) { |
@@ -1600,9 +1670,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1600 | } | 1670 | } |
1601 | if (end == vma->vm_end) | 1671 | if (end == vma->vm_end) |
1602 | goto erase_whole_vma; | 1672 | goto erase_whole_vma; |
1603 | rb = rb_next(&vma->vm_rb); | 1673 | vma = vma->vm_next; |
1604 | vma = rb_entry(rb, struct vm_area_struct, vm_rb); | 1674 | } while (vma); |
1605 | } while (rb); | ||
1606 | kleave(" = -EINVAL [split file]"); | 1675 | kleave(" = -EINVAL [split file]"); |
1607 | return -EINVAL; | 1676 | return -EINVAL; |
1608 | } else { | 1677 | } else { |
@@ -1668,6 +1737,7 @@ void exit_mmap(struct mm_struct *mm) | |||
1668 | mm->mmap = vma->vm_next; | 1737 | mm->mmap = vma->vm_next; |
1669 | delete_vma_from_mm(vma); | 1738 | delete_vma_from_mm(vma); |
1670 | delete_vma(mm, vma); | 1739 | delete_vma(mm, vma); |
1740 | cond_resched(); | ||
1671 | } | 1741 | } |
1672 | 1742 | ||
1673 | kleave(""); | 1743 | kleave(""); |
@@ -1695,6 +1765,8 @@ unsigned long do_mremap(unsigned long addr, | |||
1695 | struct vm_area_struct *vma; | 1765 | struct vm_area_struct *vma; |
1696 | 1766 | ||
1697 | /* insanity checks first */ | 1767 | /* insanity checks first */ |
1768 | old_len = PAGE_ALIGN(old_len); | ||
1769 | new_len = PAGE_ALIGN(new_len); | ||
1698 | if (old_len == 0 || new_len == 0) | 1770 | if (old_len == 0 || new_len == 0) |
1699 | return (unsigned long) -EINVAL; | 1771 | return (unsigned long) -EINVAL; |
1700 | 1772 | ||
@@ -1741,10 +1813,13 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, | |||
1741 | return NULL; | 1813 | return NULL; |
1742 | } | 1814 | } |
1743 | 1815 | ||
1744 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | 1816 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, |
1745 | unsigned long to, unsigned long size, pgprot_t prot) | 1817 | unsigned long pfn, unsigned long size, pgprot_t prot) |
1746 | { | 1818 | { |
1747 | vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; | 1819 | if (addr != (pfn << PAGE_SHIFT)) |
1820 | return -EINVAL; | ||
1821 | |||
1822 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | ||
1748 | return 0; | 1823 | return 0; |
1749 | } | 1824 | } |
1750 | EXPORT_SYMBOL(remap_pfn_range); | 1825 | EXPORT_SYMBOL(remap_pfn_range); |
@@ -1764,10 +1839,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | |||
1764 | } | 1839 | } |
1765 | EXPORT_SYMBOL(remap_vmalloc_range); | 1840 | EXPORT_SYMBOL(remap_vmalloc_range); |
1766 | 1841 | ||
1767 | void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | ||
1768 | { | ||
1769 | } | ||
1770 | |||
1771 | unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, | 1842 | unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, |
1772 | unsigned long len, unsigned long pgoff, unsigned long flags) | 1843 | unsigned long len, unsigned long pgoff, unsigned long flags) |
1773 | { | 1844 | { |
@@ -1885,7 +1956,7 @@ error: | |||
1885 | return -ENOMEM; | 1956 | return -ENOMEM; |
1886 | } | 1957 | } |
1887 | 1958 | ||
1888 | int in_gate_area_no_task(unsigned long addr) | 1959 | int in_gate_area_no_mm(unsigned long addr) |
1889 | { | 1960 | { |
1890 | return 0; | 1961 | return 0; |
1891 | } | 1962 | } |
@@ -1897,21 +1968,10 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1897 | } | 1968 | } |
1898 | EXPORT_SYMBOL(filemap_fault); | 1969 | EXPORT_SYMBOL(filemap_fault); |
1899 | 1970 | ||
1900 | /* | 1971 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
1901 | * Access another process' address space. | 1972 | unsigned long addr, void *buf, int len, int write) |
1902 | * - source/target buffer must be kernel space | ||
1903 | */ | ||
1904 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) | ||
1905 | { | 1973 | { |
1906 | struct vm_area_struct *vma; | 1974 | struct vm_area_struct *vma; |
1907 | struct mm_struct *mm; | ||
1908 | |||
1909 | if (addr + len < addr) | ||
1910 | return 0; | ||
1911 | |||
1912 | mm = get_task_mm(tsk); | ||
1913 | if (!mm) | ||
1914 | return 0; | ||
1915 | 1975 | ||
1916 | down_read(&mm->mmap_sem); | 1976 | down_read(&mm->mmap_sem); |
1917 | 1977 | ||
@@ -1936,6 +1996,43 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in | |||
1936 | } | 1996 | } |
1937 | 1997 | ||
1938 | up_read(&mm->mmap_sem); | 1998 | up_read(&mm->mmap_sem); |
1999 | |||
2000 | return len; | ||
2001 | } | ||
2002 | |||
2003 | /** | ||
2004 | * @access_remote_vm - access another process' address space | ||
2005 | * @mm: the mm_struct of the target address space | ||
2006 | * @addr: start address to access | ||
2007 | * @buf: source or destination buffer | ||
2008 | * @len: number of bytes to transfer | ||
2009 | * @write: whether the access is a write | ||
2010 | * | ||
2011 | * The caller must hold a reference on @mm. | ||
2012 | */ | ||
2013 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, | ||
2014 | void *buf, int len, int write) | ||
2015 | { | ||
2016 | return __access_remote_vm(NULL, mm, addr, buf, len, write); | ||
2017 | } | ||
2018 | |||
2019 | /* | ||
2020 | * Access another process' address space. | ||
2021 | * - source/target buffer must be kernel space | ||
2022 | */ | ||
2023 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) | ||
2024 | { | ||
2025 | struct mm_struct *mm; | ||
2026 | |||
2027 | if (addr + len < addr) | ||
2028 | return 0; | ||
2029 | |||
2030 | mm = get_task_mm(tsk); | ||
2031 | if (!mm) | ||
2032 | return 0; | ||
2033 | |||
2034 | len = __access_remote_vm(tsk, mm, addr, buf, len, write); | ||
2035 | |||
1939 | mmput(mm); | 2036 | mmput(mm); |
1940 | return len; | 2037 | return len; |
1941 | } | 2038 | } |