diff options
author | Bernd Schmidt <bernds_cb1@t-online.de> | 2009-09-21 20:03:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:43 -0400 |
commit | eb8cdec4a984fde123a91250dcc9e0bddf5eafdc (patch) | |
tree | 9f97b5949e6e63ae947363149b62ed224dad5ab9 /mm | |
parent | 02e87d1a934c70e3599eb7a29db783806d329e17 (diff) |
nommu: add support for Memory Protection Units (MPU)
Some architectures (like the Blackfin arch) implement some of the
"simpler" features that one would expect out of a MMU such as memory
protection.
In our case, we actually get read/write/exec protection down to the page
boundary so processes can't stomp on each other let alone the kernel.
There is a performance decrease (which depends greatly on the workload)
however as the hardware/software interaction was not optimized at design
time.
Signed-off-by: Bernd Schmidt <bernds_cb1@t-online.de>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Acked-by: David Howells <dhowells@redhat.com>
Acked-by: Greg Ungerer <gerg@snapgear.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/nommu.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 2d02ca17ce18..1a4473faac48 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | #include <asm/tlb.h> | 34 | #include <asm/tlb.h> |
35 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
36 | #include <asm/mmu_context.h> | ||
36 | #include "internal.h" | 37 | #include "internal.h" |
37 | 38 | ||
38 | static inline __attribute__((format(printf, 1, 2))) | 39 | static inline __attribute__((format(printf, 1, 2))) |
@@ -623,6 +624,22 @@ static void put_nommu_region(struct vm_region *region) | |||
623 | } | 624 | } |
624 | 625 | ||
625 | /* | 626 | /* |
627 | * update protection on a vma | ||
628 | */ | ||
629 | static void protect_vma(struct vm_area_struct *vma, unsigned long flags) | ||
630 | { | ||
631 | #ifdef CONFIG_MPU | ||
632 | struct mm_struct *mm = vma->vm_mm; | ||
633 | long start = vma->vm_start & PAGE_MASK; | ||
634 | while (start < vma->vm_end) { | ||
635 | protect_page(mm, start, flags); | ||
636 | start += PAGE_SIZE; | ||
637 | } | ||
638 | update_protections(mm); | ||
639 | #endif | ||
640 | } | ||
641 | |||
642 | /* | ||
626 | * add a VMA into a process's mm_struct in the appropriate place in the list | 643 | * add a VMA into a process's mm_struct in the appropriate place in the list |
627 | * and tree and add to the address space's page tree also if not an anonymous | 644 | * and tree and add to the address space's page tree also if not an anonymous |
628 | * page | 645 | * page |
@@ -641,6 +658,8 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
641 | mm->map_count++; | 658 | mm->map_count++; |
642 | vma->vm_mm = mm; | 659 | vma->vm_mm = mm; |
643 | 660 | ||
661 | protect_vma(vma, vma->vm_flags); | ||
662 | |||
644 | /* add the VMA to the mapping */ | 663 | /* add the VMA to the mapping */ |
645 | if (vma->vm_file) { | 664 | if (vma->vm_file) { |
646 | mapping = vma->vm_file->f_mapping; | 665 | mapping = vma->vm_file->f_mapping; |
@@ -703,6 +722,8 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
703 | 722 | ||
704 | kenter("%p", vma); | 723 | kenter("%p", vma); |
705 | 724 | ||
725 | protect_vma(vma, 0); | ||
726 | |||
706 | mm->map_count--; | 727 | mm->map_count--; |
707 | if (mm->mmap_cache == vma) | 728 | if (mm->mmap_cache == vma) |
708 | mm->mmap_cache = NULL; | 729 | mm->mmap_cache = NULL; |