aboutsummaryrefslogtreecommitdiffstats
path: root/fs/exec.c
diff options
context:
space:
mode:
authorOllie Wild <aaw@google.com>2007-07-19 04:48:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:45 -0400
commitb6a2fea39318e43fee84fa7b0b90d68bed92d2ba (patch)
treec9c3619cb2730b5c10c7427b837146bce3d69156 /fs/exec.c
parentbdf4c48af20a3b0f01671799ace345e3d49576da (diff)
mm: variable length argument support
Remove the arg+env limit of MAX_ARG_PAGES by copying the strings directly from the old mm into the new mm. We create the new mm before the binfmt code runs, and place the new stack at the very top of the address space. Once the binfmt code runs and figures out where the stack should be, we move it downwards. It is a bit peculiar in that we have one task with two mm's, one of which is inactive. [a.p.zijlstra@chello.nl: limit stack size] Signed-off-by: Ollie Wild <aaw@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: <linux-arch@vger.kernel.org> Cc: Hugh Dickins <hugh@veritas.com> [bunk@stusta.de: unexport bprm_mm_init] Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/exec.c')
-rw-r--r--fs/exec.c614
1 files changed, 396 insertions, 218 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 2e3f7950c185..498f2b3dca20 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -54,6 +54,7 @@
54 54
55#include <asm/uaccess.h> 55#include <asm/uaccess.h>
56#include <asm/mmu_context.h> 56#include <asm/mmu_context.h>
57#include <asm/tlb.h>
57 58
58#ifdef CONFIG_KMOD 59#ifdef CONFIG_KMOD
59#include <linux/kmod.h> 60#include <linux/kmod.h>
@@ -178,6 +179,207 @@ exit:
178 goto out; 179 goto out;
179} 180}
180 181
182#ifdef CONFIG_MMU
183
184static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
185 int write)
186{
187 struct page *page;
188 int ret;
189
190#ifdef CONFIG_STACK_GROWSUP
191 if (write) {
192 ret = expand_stack_downwards(bprm->vma, pos);
193 if (ret < 0)
194 return NULL;
195 }
196#endif
197 ret = get_user_pages(current, bprm->mm, pos,
198 1, write, 1, &page, NULL);
199 if (ret <= 0)
200 return NULL;
201
202 if (write) {
203 struct rlimit *rlim = current->signal->rlim;
204 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
205
206 /*
207 * Limit to 1/4-th the stack size for the argv+env strings.
208 * This ensures that:
209 * - the remaining binfmt code will not run out of stack space,
210 * - the program will have a reasonable amount of stack left
211 * to work from.
212 */
213 if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
214 put_page(page);
215 return NULL;
216 }
217 }
218
219 return page;
220}
221
222static void put_arg_page(struct page *page)
223{
224 put_page(page);
225}
226
227static void free_arg_page(struct linux_binprm *bprm, int i)
228{
229}
230
231static void free_arg_pages(struct linux_binprm *bprm)
232{
233}
234
235static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
236 struct page *page)
237{
238 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
239}
240
241static int __bprm_mm_init(struct linux_binprm *bprm)
242{
243 int err = -ENOMEM;
244 struct vm_area_struct *vma = NULL;
245 struct mm_struct *mm = bprm->mm;
246
247 bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
248 if (!vma)
249 goto err;
250
251 down_write(&mm->mmap_sem);
252 vma->vm_mm = mm;
253
254 /*
255 * Place the stack at the largest stack address the architecture
256 * supports. Later, we'll move this to an appropriate place. We don't
257 * use STACK_TOP because that can depend on attributes which aren't
258 * configured yet.
259 */
260 vma->vm_end = STACK_TOP_MAX;
261 vma->vm_start = vma->vm_end - PAGE_SIZE;
262
263 vma->vm_flags = VM_STACK_FLAGS;
264 vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
265 err = insert_vm_struct(mm, vma);
266 if (err) {
267 up_write(&mm->mmap_sem);
268 goto err;
269 }
270
271 mm->stack_vm = mm->total_vm = 1;
272 up_write(&mm->mmap_sem);
273
274 bprm->p = vma->vm_end - sizeof(void *);
275
276 return 0;
277
278err:
279 if (vma) {
280 bprm->vma = NULL;
281 kmem_cache_free(vm_area_cachep, vma);
282 }
283
284 return err;
285}
286
287static bool valid_arg_len(struct linux_binprm *bprm, long len)
288{
289 return len <= MAX_ARG_STRLEN;
290}
291
292#else
293
294static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
295 int write)
296{
297 struct page *page;
298
299 page = bprm->page[pos / PAGE_SIZE];
300 if (!page && write) {
301 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
302 if (!page)
303 return NULL;
304 bprm->page[pos / PAGE_SIZE] = page;
305 }
306
307 return page;
308}
309
310static void put_arg_page(struct page *page)
311{
312}
313
314static void free_arg_page(struct linux_binprm *bprm, int i)
315{
316 if (bprm->page[i]) {
317 __free_page(bprm->page[i]);
318 bprm->page[i] = NULL;
319 }
320}
321
322static void free_arg_pages(struct linux_binprm *bprm)
323{
324 int i;
325
326 for (i = 0; i < MAX_ARG_PAGES; i++)
327 free_arg_page(bprm, i);
328}
329
330static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
331 struct page *page)
332{
333}
334
335static int __bprm_mm_init(struct linux_binprm *bprm)
336{
337 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
338 return 0;
339}
340
341static bool valid_arg_len(struct linux_binprm *bprm, long len)
342{
343 return len <= bprm->p;
344}
345
346#endif /* CONFIG_MMU */
347
348/*
349 * Create a new mm_struct and populate it with a temporary stack
350 * vm_area_struct. We don't have enough context at this point to set the stack
351 * flags, permissions, and offset, so we use temporary values. We'll update
352 * them later in setup_arg_pages().
353 */
354int bprm_mm_init(struct linux_binprm *bprm)
355{
356 int err;
357 struct mm_struct *mm = NULL;
358
359 bprm->mm = mm = mm_alloc();
360 err = -ENOMEM;
361 if (!mm)
362 goto err;
363
364 err = init_new_context(current, mm);
365 if (err)
366 goto err;
367
368 err = __bprm_mm_init(bprm);
369 if (err)
370 goto err;
371
372 return 0;
373
374err:
375 if (mm) {
376 bprm->mm = NULL;
377 mmdrop(mm);
378 }
379
380 return err;
381}
382
181/* 383/*
182 * count() counts the number of strings in array ARGV. 384 * count() counts the number of strings in array ARGV.
183 */ 385 */
@@ -203,15 +405,16 @@ static int count(char __user * __user * argv, int max)
203} 405}
204 406
205/* 407/*
206 * 'copy_strings()' copies argument/environment strings from user 408 * 'copy_strings()' copies argument/environment strings from the old
207 * memory to free pages in kernel mem. These are in a format ready 409 * processes's memory to the new process's stack. The call to get_user_pages()
208 * to be put directly into the top of new user memory. 410 * ensures the destination page is created and not swapped out.
209 */ 411 */
210static int copy_strings(int argc, char __user * __user * argv, 412static int copy_strings(int argc, char __user * __user * argv,
211 struct linux_binprm *bprm) 413 struct linux_binprm *bprm)
212{ 414{
213 struct page *kmapped_page = NULL; 415 struct page *kmapped_page = NULL;
214 char *kaddr = NULL; 416 char *kaddr = NULL;
417 unsigned long kpos = 0;
215 int ret; 418 int ret;
216 419
217 while (argc-- > 0) { 420 while (argc-- > 0) {
@@ -220,69 +423,69 @@ static int copy_strings(int argc, char __user * __user * argv,
220 unsigned long pos; 423 unsigned long pos;
221 424
222 if (get_user(str, argv+argc) || 425 if (get_user(str, argv+argc) ||
223 !(len = strnlen_user(str, bprm->p))) { 426 !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
224 ret = -EFAULT; 427 ret = -EFAULT;
225 goto out; 428 goto out;
226 } 429 }
227 430
228 if (bprm->p < len) { 431 if (!valid_arg_len(bprm, len)) {
229 ret = -E2BIG; 432 ret = -E2BIG;
230 goto out; 433 goto out;
231 } 434 }
232 435
233 bprm->p -= len; 436 /* We're going to work our way backwords. */
234 /* XXX: add architecture specific overflow check here. */
235 pos = bprm->p; 437 pos = bprm->p;
438 str += len;
439 bprm->p -= len;
236 440
237 while (len > 0) { 441 while (len > 0) {
238 int i, new, err;
239 int offset, bytes_to_copy; 442 int offset, bytes_to_copy;
240 struct page *page;
241 443
242 offset = pos % PAGE_SIZE; 444 offset = pos % PAGE_SIZE;
243 i = pos/PAGE_SIZE; 445 if (offset == 0)
244 page = bprm->page[i]; 446 offset = PAGE_SIZE;
245 new = 0; 447
246 if (!page) { 448 bytes_to_copy = offset;
247 page = alloc_page(GFP_HIGHUSER); 449 if (bytes_to_copy > len)
248 bprm->page[i] = page; 450 bytes_to_copy = len;
451
452 offset -= bytes_to_copy;
453 pos -= bytes_to_copy;
454 str -= bytes_to_copy;
455 len -= bytes_to_copy;
456
457 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
458 struct page *page;
459
460 page = get_arg_page(bprm, pos, 1);
249 if (!page) { 461 if (!page) {
250 ret = -ENOMEM; 462 ret = -E2BIG;
251 goto out; 463 goto out;
252 } 464 }
253 new = 1;
254 }
255 465
256 if (page != kmapped_page) { 466 if (kmapped_page) {
257 if (kmapped_page) 467 flush_kernel_dcache_page(kmapped_page);
258 kunmap(kmapped_page); 468 kunmap(kmapped_page);
469 put_arg_page(kmapped_page);
470 }
259 kmapped_page = page; 471 kmapped_page = page;
260 kaddr = kmap(kmapped_page); 472 kaddr = kmap(kmapped_page);
473 kpos = pos & PAGE_MASK;
474 flush_arg_page(bprm, kpos, kmapped_page);
261 } 475 }
262 if (new && offset) 476 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
263 memset(kaddr, 0, offset);
264 bytes_to_copy = PAGE_SIZE - offset;
265 if (bytes_to_copy > len) {
266 bytes_to_copy = len;
267 if (new)
268 memset(kaddr+offset+len, 0,
269 PAGE_SIZE-offset-len);
270 }
271 err = copy_from_user(kaddr+offset, str, bytes_to_copy);
272 if (err) {
273 ret = -EFAULT; 477 ret = -EFAULT;
274 goto out; 478 goto out;
275 } 479 }
276
277 pos += bytes_to_copy;
278 str += bytes_to_copy;
279 len -= bytes_to_copy;
280 } 480 }
281 } 481 }
282 ret = 0; 482 ret = 0;
283out: 483out:
284 if (kmapped_page) 484 if (kmapped_page) {
485 flush_kernel_dcache_page(kmapped_page);
285 kunmap(kmapped_page); 486 kunmap(kmapped_page);
487 put_arg_page(kmapped_page);
488 }
286 return ret; 489 return ret;
287} 490}
288 491
@@ -298,181 +501,172 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
298 set_fs(oldfs); 501 set_fs(oldfs);
299 return r; 502 return r;
300} 503}
301
302EXPORT_SYMBOL(copy_strings_kernel); 504EXPORT_SYMBOL(copy_strings_kernel);
303 505
304#ifdef CONFIG_MMU 506#ifdef CONFIG_MMU
507
305/* 508/*
306 * This routine is used to map in a page into an address space: needed by 509 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
307 * execve() for the initial stack and environment pages. 510 * the binfmt code determines where the new stack should reside, we shift it to
511 * its final location. The process proceeds as follows:
308 * 512 *
309 * vma->vm_mm->mmap_sem is held for writing. 513 * 1) Use shift to calculate the new vma endpoints.
514 * 2) Extend vma to cover both the old and new ranges. This ensures the
515 * arguments passed to subsequent functions are consistent.
516 * 3) Move vma's page tables to the new range.
517 * 4) Free up any cleared pgd range.
518 * 5) Shrink the vma to cover only the new range.
310 */ 519 */
311void install_arg_page(struct vm_area_struct *vma, 520static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
312 struct page *page, unsigned long address)
313{ 521{
314 struct mm_struct *mm = vma->vm_mm; 522 struct mm_struct *mm = vma->vm_mm;
315 pte_t * pte; 523 unsigned long old_start = vma->vm_start;
316 spinlock_t *ptl; 524 unsigned long old_end = vma->vm_end;
525 unsigned long length = old_end - old_start;
526 unsigned long new_start = old_start - shift;
527 unsigned long new_end = old_end - shift;
528 struct mmu_gather *tlb;
317 529
318 if (unlikely(anon_vma_prepare(vma))) 530 BUG_ON(new_start > new_end);
319 goto out;
320 531
321 flush_dcache_page(page); 532 /*
322 pte = get_locked_pte(mm, address, &ptl); 533 * ensure there are no vmas between where we want to go
323 if (!pte) 534 * and where we are
324 goto out; 535 */
325 if (!pte_none(*pte)) { 536 if (vma != find_vma(mm, new_start))
326 pte_unmap_unlock(pte, ptl); 537 return -EFAULT;
327 goto out; 538
539 /*
540 * cover the whole range: [new_start, old_end)
541 */
542 vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
543
544 /*
545 * move the page tables downwards, on failure we rely on
546 * process cleanup to remove whatever mess we made.
547 */
548 if (length != move_page_tables(vma, old_start,
549 vma, new_start, length))
550 return -ENOMEM;
551
552 lru_add_drain();
553 tlb = tlb_gather_mmu(mm, 0);
554 if (new_end > old_start) {
555 /*
556 * when the old and new regions overlap clear from new_end.
557 */
558 free_pgd_range(&tlb, new_end, old_end, new_end,
559 vma->vm_next ? vma->vm_next->vm_start : 0);
560 } else {
561 /*
562 * otherwise, clean from old_start; this is done to not touch
563 * the address space in [new_end, old_start) some architectures
564 * have constraints on va-space that make this illegal (IA64) -
565 * for the others its just a little faster.
566 */
567 free_pgd_range(&tlb, old_start, old_end, new_end,
568 vma->vm_next ? vma->vm_next->vm_start : 0);
328 } 569 }
329 inc_mm_counter(mm, anon_rss); 570 tlb_finish_mmu(tlb, new_end, old_end);
330 lru_cache_add_active(page); 571
331 set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( 572 /*
332 page, vma->vm_page_prot)))); 573 * shrink the vma to just the new range.
333 page_add_new_anon_rmap(page, vma, address); 574 */
334 pte_unmap_unlock(pte, ptl); 575 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
335 576
336 /* no need for flush_tlb */ 577 return 0;
337 return;
338out:
339 __free_page(page);
340 force_sig(SIGKILL, current);
341} 578}
342 579
343#define EXTRA_STACK_VM_PAGES 20 /* random */ 580#define EXTRA_STACK_VM_PAGES 20 /* random */
344 581
582/*
583 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
584 * the stack is optionally relocated, and some extra space is added.
585 */
345int setup_arg_pages(struct linux_binprm *bprm, 586int setup_arg_pages(struct linux_binprm *bprm,
346 unsigned long stack_top, 587 unsigned long stack_top,
347 int executable_stack) 588 int executable_stack)
348{ 589{
349 unsigned long stack_base; 590 unsigned long ret;
350 struct vm_area_struct *mpnt; 591 unsigned long stack_shift;
351 struct mm_struct *mm = current->mm; 592 struct mm_struct *mm = current->mm;
352 int i, ret; 593 struct vm_area_struct *vma = bprm->vma;
353 long arg_size; 594 struct vm_area_struct *prev = NULL;
595 unsigned long vm_flags;
596 unsigned long stack_base;
354 597
355#ifdef CONFIG_STACK_GROWSUP 598#ifdef CONFIG_STACK_GROWSUP
356 /* Move the argument and environment strings to the bottom of the
357 * stack space.
358 */
359 int offset, j;
360 char *to, *from;
361
362 /* Start by shifting all the pages down */
363 i = 0;
364 for (j = 0; j < MAX_ARG_PAGES; j++) {
365 struct page *page = bprm->page[j];
366 if (!page)
367 continue;
368 bprm->page[i++] = page;
369 }
370
371 /* Now move them within their pages */
372 offset = bprm->p % PAGE_SIZE;
373 to = kmap(bprm->page[0]);
374 for (j = 1; j < i; j++) {
375 memmove(to, to + offset, PAGE_SIZE - offset);
376 from = kmap(bprm->page[j]);
377 memcpy(to + PAGE_SIZE - offset, from, offset);
378 kunmap(bprm->page[j - 1]);
379 to = from;
380 }
381 memmove(to, to + offset, PAGE_SIZE - offset);
382 kunmap(bprm->page[j - 1]);
383
384 /* Limit stack size to 1GB */ 599 /* Limit stack size to 1GB */
385 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max; 600 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
386 if (stack_base > (1 << 30)) 601 if (stack_base > (1 << 30))
387 stack_base = 1 << 30; 602 stack_base = 1 << 30;
388 stack_base = PAGE_ALIGN(stack_top - stack_base);
389 603
390 /* Adjust bprm->p to point to the end of the strings. */ 604 /* Make sure we didn't let the argument array grow too large. */
391 bprm->p = stack_base + PAGE_SIZE * i - offset; 605 if (vma->vm_end - vma->vm_start > stack_base)
606 return -ENOMEM;
392 607
393 mm->arg_start = stack_base; 608 stack_base = PAGE_ALIGN(stack_top - stack_base);
394 arg_size = i << PAGE_SHIFT;
395 609
396 /* zero pages that were copied above */ 610 stack_shift = vma->vm_start - stack_base;
397 while (i < MAX_ARG_PAGES) 611 mm->arg_start = bprm->p - stack_shift;
398 bprm->page[i++] = NULL; 612 bprm->p = vma->vm_end - stack_shift;
399#else 613#else
400 stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE); 614 stack_top = arch_align_stack(stack_top);
401 stack_base = PAGE_ALIGN(stack_base); 615 stack_top = PAGE_ALIGN(stack_top);
402 bprm->p += stack_base; 616 stack_shift = vma->vm_end - stack_top;
617
618 bprm->p -= stack_shift;
403 mm->arg_start = bprm->p; 619 mm->arg_start = bprm->p;
404 arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
405#endif 620#endif
406 621
407 arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
408
409 if (bprm->loader) 622 if (bprm->loader)
410 bprm->loader += stack_base; 623 bprm->loader -= stack_shift;
411 bprm->exec += stack_base; 624 bprm->exec -= stack_shift;
412
413 mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
414 if (!mpnt)
415 return -ENOMEM;
416 625
417 down_write(&mm->mmap_sem); 626 down_write(&mm->mmap_sem);
418 { 627 vm_flags = vma->vm_flags;
419 mpnt->vm_mm = mm; 628
420#ifdef CONFIG_STACK_GROWSUP 629 /*
421 mpnt->vm_start = stack_base; 630 * Adjust stack execute permissions; explicitly enable for
422 mpnt->vm_end = stack_base + arg_size; 631 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
423#else 632 * (arch default) otherwise.
424 mpnt->vm_end = stack_top; 633 */
425 mpnt->vm_start = mpnt->vm_end - arg_size; 634 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
426#endif 635 vm_flags |= VM_EXEC;
427 /* Adjust stack execute permissions; explicitly enable 636 else if (executable_stack == EXSTACK_DISABLE_X)
428 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X 637 vm_flags &= ~VM_EXEC;
429 * and leave alone (arch default) otherwise. */ 638 vm_flags |= mm->def_flags;
430 if (unlikely(executable_stack == EXSTACK_ENABLE_X)) 639
431 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC; 640 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
432 else if (executable_stack == EXSTACK_DISABLE_X) 641 vm_flags);
433 mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC; 642 if (ret)
434 else 643 goto out_unlock;
435 mpnt->vm_flags = VM_STACK_FLAGS; 644 BUG_ON(prev != vma);
436 mpnt->vm_flags |= mm->def_flags; 645
437 mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7]; 646 /* Move stack pages down in memory. */
438 if ((ret = insert_vm_struct(mm, mpnt))) { 647 if (stack_shift) {
648 ret = shift_arg_pages(vma, stack_shift);
649 if (ret) {
439 up_write(&mm->mmap_sem); 650 up_write(&mm->mmap_sem);
440 kmem_cache_free(vm_area_cachep, mpnt);
441 return ret; 651 return ret;
442 } 652 }
443 mm->stack_vm = mm->total_vm = vma_pages(mpnt);
444 } 653 }
445 654
446 for (i = 0 ; i < MAX_ARG_PAGES ; i++) { 655#ifdef CONFIG_STACK_GROWSUP
447 struct page *page = bprm->page[i]; 656 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
448 if (page) { 657#else
449 bprm->page[i] = NULL; 658 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
450 install_arg_page(mpnt, page, stack_base); 659#endif
451 } 660 ret = expand_stack(vma, stack_base);
452 stack_base += PAGE_SIZE; 661 if (ret)
453 } 662 ret = -EFAULT;
663
664out_unlock:
454 up_write(&mm->mmap_sem); 665 up_write(&mm->mmap_sem);
455
456 return 0; 666 return 0;
457} 667}
458
459EXPORT_SYMBOL(setup_arg_pages); 668EXPORT_SYMBOL(setup_arg_pages);
460 669
461#define free_arg_pages(bprm) do { } while (0)
462
463#else
464
465static inline void free_arg_pages(struct linux_binprm *bprm)
466{
467 int i;
468
469 for (i = 0; i < MAX_ARG_PAGES; i++) {
470 if (bprm->page[i])
471 __free_page(bprm->page[i]);
472 bprm->page[i] = NULL;
473 }
474}
475
476#endif /* CONFIG_MMU */ 670#endif /* CONFIG_MMU */
477 671
478struct file *open_exec(const char *name) 672struct file *open_exec(const char *name)
@@ -1000,43 +1194,42 @@ EXPORT_SYMBOL(compute_creds);
1000 * points to; chop off the first by relocating brpm->p to right after 1194 * points to; chop off the first by relocating brpm->p to right after
1001 * the first '\0' encountered. 1195 * the first '\0' encountered.
1002 */ 1196 */
1003void remove_arg_zero(struct linux_binprm *bprm) 1197int remove_arg_zero(struct linux_binprm *bprm)
1004{ 1198{
1005 if (bprm->argc) { 1199 int ret = 0;
1006 char ch; 1200 unsigned long offset;
1201 char *kaddr;
1202 struct page *page;
1007 1203
1008 do { 1204 if (!bprm->argc)
1009 unsigned long offset; 1205 return 0;
1010 unsigned long index;
1011 char *kaddr;
1012 struct page *page;
1013
1014 offset = bprm->p & ~PAGE_MASK;
1015 index = bprm->p >> PAGE_SHIFT;
1016 1206
1017 page = bprm->page[index]; 1207 do {
1018 kaddr = kmap_atomic(page, KM_USER0); 1208 offset = bprm->p & ~PAGE_MASK;
1209 page = get_arg_page(bprm, bprm->p, 0);
1210 if (!page) {
1211 ret = -EFAULT;
1212 goto out;
1213 }
1214 kaddr = kmap_atomic(page, KM_USER0);
1019 1215
1020 /* run through page until we reach end or find NUL */ 1216 for (; offset < PAGE_SIZE && kaddr[offset];
1021 do { 1217 offset++, bprm->p++)
1022 ch = *(kaddr + offset); 1218 ;
1023 1219
1024 /* discard that character... */ 1220 kunmap_atomic(kaddr, KM_USER0);
1025 bprm->p++; 1221 put_arg_page(page);
1026 offset++;
1027 } while (offset < PAGE_SIZE && ch != '\0');
1028 1222
1029 kunmap_atomic(kaddr, KM_USER0); 1223 if (offset == PAGE_SIZE)
1224 free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1225 } while (offset == PAGE_SIZE);
1030 1226
1031 /* free the old page */ 1227 bprm->p++;
1032 if (offset == PAGE_SIZE) { 1228 bprm->argc--;
1033 __free_page(page); 1229 ret = 0;
1034 bprm->page[index] = NULL;
1035 }
1036 } while (ch != '\0');
1037 1230
1038 bprm->argc--; 1231out:
1039 } 1232 return ret;
1040} 1233}
1041EXPORT_SYMBOL(remove_arg_zero); 1234EXPORT_SYMBOL(remove_arg_zero);
1042 1235
@@ -1062,7 +1255,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1062 fput(bprm->file); 1255 fput(bprm->file);
1063 bprm->file = NULL; 1256 bprm->file = NULL;
1064 1257
1065 loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *); 1258 loader = bprm->vma->vm_end - sizeof(void *);
1066 1259
1067 file = open_exec("/sbin/loader"); 1260 file = open_exec("/sbin/loader");
1068 retval = PTR_ERR(file); 1261 retval = PTR_ERR(file);
@@ -1156,7 +1349,6 @@ int do_execve(char * filename,
1156 struct file *file; 1349 struct file *file;
1157 unsigned long env_p; 1350 unsigned long env_p;
1158 int retval; 1351 int retval;
1159 int i;
1160 1352
1161 retval = -ENOMEM; 1353 retval = -ENOMEM;
1162 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); 1354 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
@@ -1170,25 +1362,19 @@ int do_execve(char * filename,
1170 1362
1171 sched_exec(); 1363 sched_exec();
1172 1364
1173 bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1174
1175 bprm->file = file; 1365 bprm->file = file;
1176 bprm->filename = filename; 1366 bprm->filename = filename;
1177 bprm->interp = filename; 1367 bprm->interp = filename;
1178 bprm->mm = mm_alloc();
1179 retval = -ENOMEM;
1180 if (!bprm->mm)
1181 goto out_file;
1182 1368
1183 retval = init_new_context(current, bprm->mm); 1369 retval = bprm_mm_init(bprm);
1184 if (retval < 0) 1370 if (retval)
1185 goto out_mm; 1371 goto out_file;
1186 1372
1187 bprm->argc = count(argv, bprm->p / sizeof(void *)); 1373 bprm->argc = count(argv, MAX_ARG_STRINGS);
1188 if ((retval = bprm->argc) < 0) 1374 if ((retval = bprm->argc) < 0)
1189 goto out_mm; 1375 goto out_mm;
1190 1376
1191 bprm->envc = count(envp, bprm->p / sizeof(void *)); 1377 bprm->envc = count(envp, MAX_ARG_STRINGS);
1192 if ((retval = bprm->envc) < 0) 1378 if ((retval = bprm->envc) < 0)
1193 goto out_mm; 1379 goto out_mm;
1194 1380
@@ -1217,9 +1403,8 @@ int do_execve(char * filename,
1217 1403
1218 retval = search_binary_handler(bprm,regs); 1404 retval = search_binary_handler(bprm,regs);
1219 if (retval >= 0) { 1405 if (retval >= 0) {
1220 free_arg_pages(bprm);
1221
1222 /* execve success */ 1406 /* execve success */
1407 free_arg_pages(bprm);
1223 security_bprm_free(bprm); 1408 security_bprm_free(bprm);
1224 acct_update_integrals(current); 1409 acct_update_integrals(current);
1225 kfree(bprm); 1410 kfree(bprm);
@@ -1227,26 +1412,19 @@ int do_execve(char * filename,
1227 } 1412 }
1228 1413
1229out: 1414out:
1230 /* Something went wrong, return the inode and free the argument pages*/ 1415 free_arg_pages(bprm);
1231 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1232 struct page * page = bprm->page[i];
1233 if (page)
1234 __free_page(page);
1235 }
1236
1237 if (bprm->security) 1416 if (bprm->security)
1238 security_bprm_free(bprm); 1417 security_bprm_free(bprm);
1239 1418
1240out_mm: 1419out_mm:
1241 if (bprm->mm) 1420 if (bprm->mm)
1242 mmdrop(bprm->mm); 1421 mmput (bprm->mm);
1243 1422
1244out_file: 1423out_file:
1245 if (bprm->file) { 1424 if (bprm->file) {
1246 allow_write_access(bprm->file); 1425 allow_write_access(bprm->file);
1247 fput(bprm->file); 1426 fput(bprm->file);
1248 } 1427 }
1249
1250out_kfree: 1428out_kfree:
1251 kfree(bprm); 1429 kfree(bprm);
1252 1430