diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-09-14 01:13:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-14 14:18:13 -0400 |
commit | 2fd4ef85e0db9ed75c98e13953257a967ea55e03 (patch) | |
tree | 119dfe9f88a832f3db6ff66e631112626f268f18 /arch/ppc64 | |
parent | fb085cf1d4294824571815d487daccc0609543f0 (diff) |
[PATCH] error path in setup_arg_pages() misses vm_unacct_memory()
Pavel Emelianov and Kirill Korotaev observe that fs and arch users of
security_vm_enough_memory tend to forget to vm_unacct_memory when a
failure occurs further down (typically in setup_arg_pages variants).
These are all users of insert_vm_struct, and that reservation will only
be unaccounted on exit if the vma is marked VM_ACCOUNT: which in some
cases it is (hidden inside VM_STACK_FLAGS) and in some cases it isn't.
So x86_64 32-bit and ppc64 vDSO ELFs have been leaking memory into
Committed_AS each time they're run. But don't add VM_ACCOUNT to them,
it's inappropriate to reserve against the very unlikely case that gdb
be used to COW a vDSO page - we ought to do something about that in
do_wp_page, but there are yet other inconsistencies to be resolved.
The safe and economical way to fix this is to let insert_vm_struct do
the security_vm_enough_memory check when it finds VM_ACCOUNT is set.
And the MIPS irix_brk has been calling security_vm_enough_memory before
calling do_brk which repeats it, doubly accounting and so also leaking.
Remove that, and all the fs and arch calls to security_vm_enough_memory:
give it a less misleading name later on.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-Off-By: Kirill Korotaev <dev@sw.ru>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64')
-rw-r--r-- | arch/ppc64/kernel/vdso.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/arch/ppc64/kernel/vdso.c b/arch/ppc64/kernel/vdso.c index 4777676365fe..efa985f05aca 100644 --- a/arch/ppc64/kernel/vdso.c +++ b/arch/ppc64/kernel/vdso.c | |||
@@ -224,10 +224,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) | |||
224 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 224 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); |
225 | if (vma == NULL) | 225 | if (vma == NULL) |
226 | return -ENOMEM; | 226 | return -ENOMEM; |
227 | if (security_vm_enough_memory(vdso_pages)) { | 227 | |
228 | kmem_cache_free(vm_area_cachep, vma); | ||
229 | return -ENOMEM; | ||
230 | } | ||
231 | memset(vma, 0, sizeof(*vma)); | 228 | memset(vma, 0, sizeof(*vma)); |
232 | 229 | ||
233 | /* | 230 | /* |
@@ -237,8 +234,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) | |||
237 | */ | 234 | */ |
238 | vdso_base = get_unmapped_area(NULL, vdso_base, | 235 | vdso_base = get_unmapped_area(NULL, vdso_base, |
239 | vdso_pages << PAGE_SHIFT, 0, 0); | 236 | vdso_pages << PAGE_SHIFT, 0, 0); |
240 | if (vdso_base & ~PAGE_MASK) | 237 | if (vdso_base & ~PAGE_MASK) { |
238 | kmem_cache_free(vm_area_cachep, vma); | ||
241 | return (int)vdso_base; | 239 | return (int)vdso_base; |
240 | } | ||
242 | 241 | ||
243 | current->thread.vdso_base = vdso_base; | 242 | current->thread.vdso_base = vdso_base; |
244 | 243 | ||
@@ -266,7 +265,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) | |||
266 | vma->vm_ops = &vdso_vmops; | 265 | vma->vm_ops = &vdso_vmops; |
267 | 266 | ||
268 | down_write(&mm->mmap_sem); | 267 | down_write(&mm->mmap_sem); |
269 | insert_vm_struct(mm, vma); | 268 | if (insert_vm_struct(mm, vma)) { |
269 | up_write(&mm->mmap_sem); | ||
270 | kmem_cache_free(vm_area_cachep, vma); | ||
271 | return -ENOMEM; | ||
272 | } | ||
270 | mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 273 | mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
271 | up_write(&mm->mmap_sem); | 274 | up_write(&mm->mmap_sem); |
272 | 275 | ||