diff options
Diffstat (limited to 'mm/nommu.c')
-rw-r--r-- | mm/nommu.c | 46 |
1 files changed, 32 insertions, 14 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 66e81e7e9fe9..8d484241d034 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | #include <asm/tlb.h> | 34 | #include <asm/tlb.h> |
35 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
36 | #include <asm/mmu_context.h> | ||
36 | #include "internal.h" | 37 | #include "internal.h" |
37 | 38 | ||
38 | static inline __attribute__((format(printf, 1, 2))) | 39 | static inline __attribute__((format(printf, 1, 2))) |
@@ -56,12 +57,11 @@ void no_printk(const char *fmt, ...) | |||
56 | no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) | 57 | no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) |
57 | #endif | 58 | #endif |
58 | 59 | ||
59 | #include "internal.h" | ||
60 | |||
61 | void *high_memory; | 60 | void *high_memory; |
62 | struct page *mem_map; | 61 | struct page *mem_map; |
63 | unsigned long max_mapnr; | 62 | unsigned long max_mapnr; |
64 | unsigned long num_physpages; | 63 | unsigned long num_physpages; |
64 | unsigned long highest_memmap_pfn; | ||
65 | struct percpu_counter vm_committed_as; | 65 | struct percpu_counter vm_committed_as; |
66 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 66 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
67 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 67 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
@@ -170,21 +170,20 @@ unsigned int kobjsize(const void *objp) | |||
170 | } | 170 | } |
171 | 171 | ||
172 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 172 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
173 | unsigned long start, int nr_pages, int flags, | 173 | unsigned long start, int nr_pages, unsigned int foll_flags, |
174 | struct page **pages, struct vm_area_struct **vmas) | 174 | struct page **pages, struct vm_area_struct **vmas) |
175 | { | 175 | { |
176 | struct vm_area_struct *vma; | 176 | struct vm_area_struct *vma; |
177 | unsigned long vm_flags; | 177 | unsigned long vm_flags; |
178 | int i; | 178 | int i; |
179 | int write = !!(flags & GUP_FLAGS_WRITE); | ||
180 | int force = !!(flags & GUP_FLAGS_FORCE); | ||
181 | int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); | ||
182 | 179 | ||
183 | /* calculate required read or write permissions. | 180 | /* calculate required read or write permissions. |
184 | * - if 'force' is set, we only require the "MAY" flags. | 181 | * If FOLL_FORCE is set, we only require the "MAY" flags. |
185 | */ | 182 | */ |
186 | vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | 183 | vm_flags = (foll_flags & FOLL_WRITE) ? |
187 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 184 | (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
185 | vm_flags &= (foll_flags & FOLL_FORCE) ? | ||
186 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | ||
188 | 187 | ||
189 | for (i = 0; i < nr_pages; i++) { | 188 | for (i = 0; i < nr_pages; i++) { |
190 | vma = find_vma(mm, start); | 189 | vma = find_vma(mm, start); |
@@ -192,8 +191,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
192 | goto finish_or_fault; | 191 | goto finish_or_fault; |
193 | 192 | ||
194 | /* protect what we can, including chardevs */ | 193 | /* protect what we can, including chardevs */ |
195 | if (vma->vm_flags & (VM_IO | VM_PFNMAP) || | 194 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || |
196 | (!ignore && !(vm_flags & vma->vm_flags))) | 195 | !(vm_flags & vma->vm_flags)) |
197 | goto finish_or_fault; | 196 | goto finish_or_fault; |
198 | 197 | ||
199 | if (pages) { | 198 | if (pages) { |
@@ -212,7 +211,6 @@ finish_or_fault: | |||
212 | return i ? : -EFAULT; | 211 | return i ? : -EFAULT; |
213 | } | 212 | } |
214 | 213 | ||
215 | |||
216 | /* | 214 | /* |
217 | * get a list of pages in an address range belonging to the specified process | 215 | * get a list of pages in an address range belonging to the specified process |
218 | * and indicate the VMA that covers each page | 216 | * and indicate the VMA that covers each page |
@@ -227,9 +225,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
227 | int flags = 0; | 225 | int flags = 0; |
228 | 226 | ||
229 | if (write) | 227 | if (write) |
230 | flags |= GUP_FLAGS_WRITE; | 228 | flags |= FOLL_WRITE; |
231 | if (force) | 229 | if (force) |
232 | flags |= GUP_FLAGS_FORCE; | 230 | flags |= FOLL_FORCE; |
233 | 231 | ||
234 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); | 232 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); |
235 | } | 233 | } |
@@ -627,6 +625,22 @@ static void put_nommu_region(struct vm_region *region) | |||
627 | } | 625 | } |
628 | 626 | ||
629 | /* | 627 | /* |
628 | * update protection on a vma | ||
629 | */ | ||
630 | static void protect_vma(struct vm_area_struct *vma, unsigned long flags) | ||
631 | { | ||
632 | #ifdef CONFIG_MPU | ||
633 | struct mm_struct *mm = vma->vm_mm; | ||
634 | long start = vma->vm_start & PAGE_MASK; | ||
635 | while (start < vma->vm_end) { | ||
636 | protect_page(mm, start, flags); | ||
637 | start += PAGE_SIZE; | ||
638 | } | ||
639 | update_protections(mm); | ||
640 | #endif | ||
641 | } | ||
642 | |||
643 | /* | ||
630 | * add a VMA into a process's mm_struct in the appropriate place in the list | 644 | * add a VMA into a process's mm_struct in the appropriate place in the list |
631 | * and tree and add to the address space's page tree also if not an anonymous | 645 | * and tree and add to the address space's page tree also if not an anonymous |
632 | * page | 646 | * page |
@@ -645,6 +659,8 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
645 | mm->map_count++; | 659 | mm->map_count++; |
646 | vma->vm_mm = mm; | 660 | vma->vm_mm = mm; |
647 | 661 | ||
662 | protect_vma(vma, vma->vm_flags); | ||
663 | |||
648 | /* add the VMA to the mapping */ | 664 | /* add the VMA to the mapping */ |
649 | if (vma->vm_file) { | 665 | if (vma->vm_file) { |
650 | mapping = vma->vm_file->f_mapping; | 666 | mapping = vma->vm_file->f_mapping; |
@@ -707,6 +723,8 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
707 | 723 | ||
708 | kenter("%p", vma); | 724 | kenter("%p", vma); |
709 | 725 | ||
726 | protect_vma(vma, 0); | ||
727 | |||
710 | mm->map_count--; | 728 | mm->map_count--; |
711 | if (mm->mmap_cache == vma) | 729 | if (mm->mmap_cache == vma) |
712 | mm->mmap_cache = NULL; | 730 | mm->mmap_cache = NULL; |