diff options
author | David Howells <dhowells@redhat.com> | 2006-09-27 04:50:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-27 11:26:14 -0400 |
commit | 7b4d5b8b39fd3701ed3693a89f2bd8f6ef49bce2 (patch) | |
tree | b1741de6753ec41a45a7b69276eeccb1bcb3e46d /mm | |
parent | 910e46da4b4e93d56ffea318c64afa41868d5e6d (diff) |
[PATCH] NOMMU: Check VMA protections
Check the VMA protections in get_user_pages() against what's being asked.
This checks to see that we don't accidentally write on a non-writable VMA or
permit an I/O mapping VMA to be accessed (which may lack page structs).
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/nommu.c | 30 |
1 files changed, 25 insertions, 5 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 2af50831183f..2e140a6ae22e 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -122,19 +122,35 @@ unsigned int kobjsize(const void *objp) | |||
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * The nommu dodgy version :-) | 125 | * get a list of pages in an address range belonging to the specified process |
126 | * and indicate the VMA that covers each page | ||
127 | * - this is potentially dodgy as we may end incrementing the page count of a | ||
128 | * slab page or a secondary page from a compound page | ||
129 | * - don't permit access to VMAs that don't support it, such as I/O mappings | ||
126 | */ | 130 | */ |
127 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 131 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
128 | unsigned long start, int len, int write, int force, | 132 | unsigned long start, int len, int write, int force, |
129 | struct page **pages, struct vm_area_struct **vmas) | 133 | struct page **pages, struct vm_area_struct **vmas) |
130 | { | 134 | { |
131 | int i; | ||
132 | struct vm_area_struct *vma; | 135 | struct vm_area_struct *vma; |
136 | unsigned long vm_flags; | ||
137 | int i; | ||
138 | |||
139 | /* calculate required read or write permissions. | ||
140 | * - if 'force' is set, we only require the "MAY" flags. | ||
141 | */ | ||
142 | vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | ||
143 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | ||
133 | 144 | ||
134 | for (i = 0; i < len; i++) { | 145 | for (i = 0; i < len; i++) { |
135 | vma = find_vma(mm, start); | 146 | vma = find_vma(mm, start); |
136 | if(!vma) | 147 | if (!vma) |
137 | return i ? : -EFAULT; | 148 | goto finish_or_fault; |
149 | |||
150 | /* protect what we can, including chardevs */ | ||
151 | if (vma->vm_flags & (VM_IO | VM_PFNMAP) || | ||
152 | !(vm_flags & vma->vm_flags)) | ||
153 | goto finish_or_fault; | ||
138 | 154 | ||
139 | if (pages) { | 155 | if (pages) { |
140 | pages[i] = virt_to_page(start); | 156 | pages[i] = virt_to_page(start); |
@@ -145,7 +161,11 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
145 | vmas[i] = vma; | 161 | vmas[i] = vma; |
146 | start += PAGE_SIZE; | 162 | start += PAGE_SIZE; |
147 | } | 163 | } |
148 | return(i); | 164 | |
165 | return i; | ||
166 | |||
167 | finish_or_fault: | ||
168 | return i ? : -EFAULT; | ||
149 | } | 169 | } |
150 | 170 | ||
151 | EXPORT_SYMBOL(get_user_pages); | 171 | EXPORT_SYMBOL(get_user_pages); |