diff options
Diffstat (limited to 'fs/exec.c')
| -rw-r--r-- | fs/exec.c | 212 |
1 files changed, 105 insertions, 107 deletions
| @@ -22,7 +22,6 @@ | |||
| 22 | * formats. | 22 | * formats. |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include <linux/config.h> | ||
| 26 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 27 | #include <linux/file.h> | 26 | #include <linux/file.h> |
| 28 | #include <linux/mman.h> | 27 | #include <linux/mman.h> |
| @@ -487,8 +486,6 @@ struct file *open_exec(const char *name) | |||
| 487 | if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && | 486 | if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && |
| 488 | S_ISREG(inode->i_mode)) { | 487 | S_ISREG(inode->i_mode)) { |
| 489 | int err = vfs_permission(&nd, MAY_EXEC); | 488 | int err = vfs_permission(&nd, MAY_EXEC); |
| 490 | if (!err && !(inode->i_mode & 0111)) | ||
| 491 | err = -EACCES; | ||
| 492 | file = ERR_PTR(err); | 489 | file = ERR_PTR(err); |
| 493 | if (!err) { | 490 | if (!err) { |
| 494 | file = nameidata_to_filp(&nd, O_RDONLY); | 491 | file = nameidata_to_filp(&nd, O_RDONLY); |
| @@ -598,7 +595,7 @@ static int de_thread(struct task_struct *tsk) | |||
| 598 | if (!newsighand) | 595 | if (!newsighand) |
| 599 | return -ENOMEM; | 596 | return -ENOMEM; |
| 600 | 597 | ||
| 601 | if (thread_group_empty(current)) | 598 | if (thread_group_empty(tsk)) |
| 602 | goto no_thread_group; | 599 | goto no_thread_group; |
| 603 | 600 | ||
| 604 | /* | 601 | /* |
| @@ -623,17 +620,17 @@ static int de_thread(struct task_struct *tsk) | |||
| 623 | * Reparenting needs write_lock on tasklist_lock, | 620 | * Reparenting needs write_lock on tasklist_lock, |
| 624 | * so it is safe to do it under read_lock. | 621 | * so it is safe to do it under read_lock. |
| 625 | */ | 622 | */ |
| 626 | if (unlikely(current->group_leader == child_reaper)) | 623 | if (unlikely(tsk->group_leader == child_reaper)) |
| 627 | child_reaper = current; | 624 | child_reaper = tsk; |
| 628 | 625 | ||
| 629 | zap_other_threads(current); | 626 | zap_other_threads(tsk); |
| 630 | read_unlock(&tasklist_lock); | 627 | read_unlock(&tasklist_lock); |
| 631 | 628 | ||
| 632 | /* | 629 | /* |
| 633 | * Account for the thread group leader hanging around: | 630 | * Account for the thread group leader hanging around: |
| 634 | */ | 631 | */ |
| 635 | count = 1; | 632 | count = 1; |
| 636 | if (!thread_group_leader(current)) { | 633 | if (!thread_group_leader(tsk)) { |
| 637 | count = 2; | 634 | count = 2; |
| 638 | /* | 635 | /* |
| 639 | * The SIGALRM timer survives the exec, but needs to point | 636 | * The SIGALRM timer survives the exec, but needs to point |
| @@ -642,14 +639,14 @@ static int de_thread(struct task_struct *tsk) | |||
| 642 | * synchronize with any firing (by calling del_timer_sync) | 639 | * synchronize with any firing (by calling del_timer_sync) |
| 643 | * before we can safely let the old group leader die. | 640 | * before we can safely let the old group leader die. |
| 644 | */ | 641 | */ |
| 645 | sig->tsk = current; | 642 | sig->tsk = tsk; |
| 646 | spin_unlock_irq(lock); | 643 | spin_unlock_irq(lock); |
| 647 | if (hrtimer_cancel(&sig->real_timer)) | 644 | if (hrtimer_cancel(&sig->real_timer)) |
| 648 | hrtimer_restart(&sig->real_timer); | 645 | hrtimer_restart(&sig->real_timer); |
| 649 | spin_lock_irq(lock); | 646 | spin_lock_irq(lock); |
| 650 | } | 647 | } |
| 651 | while (atomic_read(&sig->count) > count) { | 648 | while (atomic_read(&sig->count) > count) { |
| 652 | sig->group_exit_task = current; | 649 | sig->group_exit_task = tsk; |
| 653 | sig->notify_count = count; | 650 | sig->notify_count = count; |
| 654 | __set_current_state(TASK_UNINTERRUPTIBLE); | 651 | __set_current_state(TASK_UNINTERRUPTIBLE); |
| 655 | spin_unlock_irq(lock); | 652 | spin_unlock_irq(lock); |
| @@ -665,15 +662,13 @@ static int de_thread(struct task_struct *tsk) | |||
| 665 | * do is to wait for the thread group leader to become inactive, | 662 | * do is to wait for the thread group leader to become inactive, |
| 666 | * and to assume its PID: | 663 | * and to assume its PID: |
| 667 | */ | 664 | */ |
| 668 | if (!thread_group_leader(current)) { | 665 | if (!thread_group_leader(tsk)) { |
| 669 | struct dentry *proc_dentry1, *proc_dentry2; | ||
| 670 | |||
| 671 | /* | 666 | /* |
| 672 | * Wait for the thread group leader to be a zombie. | 667 | * Wait for the thread group leader to be a zombie. |
| 673 | * It should already be zombie at this point, most | 668 | * It should already be zombie at this point, most |
| 674 | * of the time. | 669 | * of the time. |
| 675 | */ | 670 | */ |
| 676 | leader = current->group_leader; | 671 | leader = tsk->group_leader; |
| 677 | while (leader->exit_state != EXIT_ZOMBIE) | 672 | while (leader->exit_state != EXIT_ZOMBIE) |
| 678 | yield(); | 673 | yield(); |
| 679 | 674 | ||
| @@ -687,16 +682,12 @@ static int de_thread(struct task_struct *tsk) | |||
| 687 | * When we take on its identity by switching to its PID, we | 682 | * When we take on its identity by switching to its PID, we |
| 688 | * also take its birthdate (always earlier than our own). | 683 | * also take its birthdate (always earlier than our own). |
| 689 | */ | 684 | */ |
| 690 | current->start_time = leader->start_time; | 685 | tsk->start_time = leader->start_time; |
| 691 | 686 | ||
| 692 | spin_lock(&leader->proc_lock); | ||
| 693 | spin_lock(¤t->proc_lock); | ||
| 694 | proc_dentry1 = proc_pid_unhash(current); | ||
| 695 | proc_dentry2 = proc_pid_unhash(leader); | ||
| 696 | write_lock_irq(&tasklist_lock); | 687 | write_lock_irq(&tasklist_lock); |
| 697 | 688 | ||
| 698 | BUG_ON(leader->tgid != current->tgid); | 689 | BUG_ON(leader->tgid != tsk->tgid); |
| 699 | BUG_ON(current->pid == current->tgid); | 690 | BUG_ON(tsk->pid == tsk->tgid); |
| 700 | /* | 691 | /* |
| 701 | * An exec() starts a new thread group with the | 692 | * An exec() starts a new thread group with the |
| 702 | * TGID of the previous thread group. Rehash the | 693 | * TGID of the previous thread group. Rehash the |
| @@ -705,34 +696,26 @@ static int de_thread(struct task_struct *tsk) | |||
| 705 | */ | 696 | */ |
| 706 | 697 | ||
| 707 | /* Become a process group leader with the old leader's pid. | 698 | /* Become a process group leader with the old leader's pid. |
| 708 | * Note: The old leader also uses thispid until release_task | 699 | * The old leader becomes a thread of the this thread group. |
| 700 | * Note: The old leader also uses this pid until release_task | ||
| 709 | * is called. Odd but simple and correct. | 701 | * is called. Odd but simple and correct. |
| 710 | */ | 702 | */ |
| 711 | detach_pid(current, PIDTYPE_PID); | 703 | detach_pid(tsk, PIDTYPE_PID); |
| 712 | current->pid = leader->pid; | 704 | tsk->pid = leader->pid; |
| 713 | attach_pid(current, PIDTYPE_PID, current->pid); | 705 | attach_pid(tsk, PIDTYPE_PID, tsk->pid); |
| 714 | attach_pid(current, PIDTYPE_PGID, current->signal->pgrp); | 706 | transfer_pid(leader, tsk, PIDTYPE_PGID); |
| 715 | attach_pid(current, PIDTYPE_SID, current->signal->session); | 707 | transfer_pid(leader, tsk, PIDTYPE_SID); |
| 716 | list_add_tail_rcu(¤t->tasks, &init_task.tasks); | 708 | list_replace_rcu(&leader->tasks, &tsk->tasks); |
| 717 | |||
| 718 | current->group_leader = current; | ||
| 719 | leader->group_leader = current; | ||
| 720 | 709 | ||
| 721 | /* Reduce leader to a thread */ | 710 | tsk->group_leader = tsk; |
| 722 | detach_pid(leader, PIDTYPE_PGID); | 711 | leader->group_leader = tsk; |
| 723 | detach_pid(leader, PIDTYPE_SID); | ||
| 724 | list_del_init(&leader->tasks); | ||
| 725 | 712 | ||
| 726 | current->exit_signal = SIGCHLD; | 713 | tsk->exit_signal = SIGCHLD; |
| 727 | 714 | ||
| 728 | BUG_ON(leader->exit_state != EXIT_ZOMBIE); | 715 | BUG_ON(leader->exit_state != EXIT_ZOMBIE); |
| 729 | leader->exit_state = EXIT_DEAD; | 716 | leader->exit_state = EXIT_DEAD; |
| 730 | 717 | ||
| 731 | write_unlock_irq(&tasklist_lock); | 718 | write_unlock_irq(&tasklist_lock); |
| 732 | spin_unlock(&leader->proc_lock); | ||
| 733 | spin_unlock(¤t->proc_lock); | ||
| 734 | proc_pid_flush(proc_dentry1); | ||
| 735 | proc_pid_flush(proc_dentry2); | ||
| 736 | } | 719 | } |
| 737 | 720 | ||
| 738 | /* | 721 | /* |
| @@ -765,9 +748,9 @@ no_thread_group: | |||
| 765 | 748 | ||
| 766 | write_lock_irq(&tasklist_lock); | 749 | write_lock_irq(&tasklist_lock); |
| 767 | spin_lock(&oldsighand->siglock); | 750 | spin_lock(&oldsighand->siglock); |
| 768 | spin_lock(&newsighand->siglock); | 751 | spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING); |
| 769 | 752 | ||
| 770 | rcu_assign_pointer(current->sighand, newsighand); | 753 | rcu_assign_pointer(tsk->sighand, newsighand); |
| 771 | recalc_sigpending(); | 754 | recalc_sigpending(); |
| 772 | 755 | ||
| 773 | spin_unlock(&newsighand->siglock); | 756 | spin_unlock(&newsighand->siglock); |
| @@ -778,7 +761,7 @@ no_thread_group: | |||
| 778 | kmem_cache_free(sighand_cachep, oldsighand); | 761 | kmem_cache_free(sighand_cachep, oldsighand); |
| 779 | } | 762 | } |
| 780 | 763 | ||
| 781 | BUG_ON(!thread_group_leader(current)); | 764 | BUG_ON(!thread_group_leader(tsk)); |
| 782 | return 0; | 765 | return 0; |
| 783 | } | 766 | } |
| 784 | 767 | ||
| @@ -915,8 +898,7 @@ int flush_old_exec(struct linux_binprm * bprm) | |||
| 915 | return 0; | 898 | return 0; |
| 916 | 899 | ||
| 917 | mmap_failed: | 900 | mmap_failed: |
| 918 | put_files_struct(current->files); | 901 | reset_files_struct(current, files); |
| 919 | current->files = files; | ||
| 920 | out: | 902 | out: |
| 921 | return retval; | 903 | return retval; |
| 922 | } | 904 | } |
| @@ -934,12 +916,6 @@ int prepare_binprm(struct linux_binprm *bprm) | |||
| 934 | int retval; | 916 | int retval; |
| 935 | 917 | ||
| 936 | mode = inode->i_mode; | 918 | mode = inode->i_mode; |
| 937 | /* | ||
| 938 | * Check execute perms again - if the caller has CAP_DAC_OVERRIDE, | ||
| 939 | * generic_permission lets a non-executable through | ||
| 940 | */ | ||
| 941 | if (!(mode & 0111)) /* with at least _one_ execute bit set */ | ||
| 942 | return -EACCES; | ||
| 943 | if (bprm->file->f_op == NULL) | 919 | if (bprm->file->f_op == NULL) |
| 944 | return -EACCES; | 920 | return -EACCES; |
| 945 | 921 | ||
| @@ -1379,67 +1355,102 @@ static void format_corename(char *corename, const char *pattern, long signr) | |||
| 1379 | *out_ptr = 0; | 1355 | *out_ptr = 0; |
| 1380 | } | 1356 | } |
| 1381 | 1357 | ||
| 1382 | static void zap_threads (struct mm_struct *mm) | 1358 | static void zap_process(struct task_struct *start) |
| 1383 | { | 1359 | { |
| 1384 | struct task_struct *g, *p; | 1360 | struct task_struct *t; |
| 1385 | struct task_struct *tsk = current; | ||
| 1386 | struct completion *vfork_done = tsk->vfork_done; | ||
| 1387 | int traced = 0; | ||
| 1388 | 1361 | ||
| 1389 | /* | 1362 | start->signal->flags = SIGNAL_GROUP_EXIT; |
| 1390 | * Make sure nobody is waiting for us to release the VM, | 1363 | start->signal->group_stop_count = 0; |
| 1391 | * otherwise we can deadlock when we wait on each other | ||
| 1392 | */ | ||
| 1393 | if (vfork_done) { | ||
| 1394 | tsk->vfork_done = NULL; | ||
| 1395 | complete(vfork_done); | ||
| 1396 | } | ||
| 1397 | 1364 | ||
| 1398 | read_lock(&tasklist_lock); | 1365 | t = start; |
| 1399 | do_each_thread(g,p) | 1366 | do { |
| 1400 | if (mm == p->mm && p != tsk) { | 1367 | if (t != current && t->mm) { |
| 1401 | force_sig_specific(SIGKILL, p); | 1368 | t->mm->core_waiters++; |
| 1402 | mm->core_waiters++; | 1369 | sigaddset(&t->pending.signal, SIGKILL); |
| 1403 | if (unlikely(p->ptrace) && | 1370 | signal_wake_up(t, 1); |
| 1404 | unlikely(p->parent->mm == mm)) | ||
| 1405 | traced = 1; | ||
| 1406 | } | 1371 | } |
| 1407 | while_each_thread(g,p); | 1372 | } while ((t = next_thread(t)) != start); |
| 1373 | } | ||
| 1408 | 1374 | ||
| 1409 | read_unlock(&tasklist_lock); | 1375 | static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, |
| 1376 | int exit_code) | ||
| 1377 | { | ||
| 1378 | struct task_struct *g, *p; | ||
| 1379 | unsigned long flags; | ||
| 1380 | int err = -EAGAIN; | ||
| 1381 | |||
| 1382 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 1383 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) { | ||
| 1384 | tsk->signal->group_exit_code = exit_code; | ||
| 1385 | zap_process(tsk); | ||
| 1386 | err = 0; | ||
| 1387 | } | ||
| 1388 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 1389 | if (err) | ||
| 1390 | return err; | ||
| 1410 | 1391 | ||
| 1411 | if (unlikely(traced)) { | 1392 | if (atomic_read(&mm->mm_users) == mm->core_waiters + 1) |
| 1412 | /* | 1393 | goto done; |
| 1413 | * We are zapping a thread and the thread it ptraces. | 1394 | |
| 1414 | * If the tracee went into a ptrace stop for exit tracing, | 1395 | rcu_read_lock(); |
| 1415 | * we could deadlock since the tracer is waiting for this | 1396 | for_each_process(g) { |
| 1416 | * coredump to finish. Detach them so they can both die. | 1397 | if (g == tsk->group_leader) |
| 1417 | */ | 1398 | continue; |
| 1418 | write_lock_irq(&tasklist_lock); | 1399 | |
| 1419 | do_each_thread(g,p) { | 1400 | p = g; |
| 1420 | if (mm == p->mm && p != tsk && | 1401 | do { |
| 1421 | p->ptrace && p->parent->mm == mm) { | 1402 | if (p->mm) { |
| 1422 | __ptrace_detach(p, 0); | 1403 | if (p->mm == mm) { |
| 1404 | /* | ||
| 1405 | * p->sighand can't disappear, but | ||
| 1406 | * may be changed by de_thread() | ||
| 1407 | */ | ||
| 1408 | lock_task_sighand(p, &flags); | ||
| 1409 | zap_process(p); | ||
| 1410 | unlock_task_sighand(p, &flags); | ||
| 1411 | } | ||
| 1412 | break; | ||
| 1423 | } | 1413 | } |
| 1424 | } while_each_thread(g,p); | 1414 | } while ((p = next_thread(p)) != g); |
| 1425 | write_unlock_irq(&tasklist_lock); | ||
| 1426 | } | 1415 | } |
| 1416 | rcu_read_unlock(); | ||
| 1417 | done: | ||
| 1418 | return mm->core_waiters; | ||
| 1427 | } | 1419 | } |
| 1428 | 1420 | ||
| 1429 | static void coredump_wait(struct mm_struct *mm) | 1421 | static int coredump_wait(int exit_code) |
| 1430 | { | 1422 | { |
| 1431 | DECLARE_COMPLETION(startup_done); | 1423 | struct task_struct *tsk = current; |
| 1424 | struct mm_struct *mm = tsk->mm; | ||
| 1425 | struct completion startup_done; | ||
| 1426 | struct completion *vfork_done; | ||
| 1432 | int core_waiters; | 1427 | int core_waiters; |
| 1433 | 1428 | ||
| 1429 | init_completion(&mm->core_done); | ||
| 1430 | init_completion(&startup_done); | ||
| 1434 | mm->core_startup_done = &startup_done; | 1431 | mm->core_startup_done = &startup_done; |
| 1435 | 1432 | ||
| 1436 | zap_threads(mm); | 1433 | core_waiters = zap_threads(tsk, mm, exit_code); |
| 1437 | core_waiters = mm->core_waiters; | ||
| 1438 | up_write(&mm->mmap_sem); | 1434 | up_write(&mm->mmap_sem); |
| 1439 | 1435 | ||
| 1436 | if (unlikely(core_waiters < 0)) | ||
| 1437 | goto fail; | ||
| 1438 | |||
| 1439 | /* | ||
| 1440 | * Make sure nobody is waiting for us to release the VM, | ||
| 1441 | * otherwise we can deadlock when we wait on each other | ||
| 1442 | */ | ||
| 1443 | vfork_done = tsk->vfork_done; | ||
| 1444 | if (vfork_done) { | ||
| 1445 | tsk->vfork_done = NULL; | ||
| 1446 | complete(vfork_done); | ||
| 1447 | } | ||
| 1448 | |||
| 1440 | if (core_waiters) | 1449 | if (core_waiters) |
| 1441 | wait_for_completion(&startup_done); | 1450 | wait_for_completion(&startup_done); |
| 1451 | fail: | ||
| 1442 | BUG_ON(mm->core_waiters); | 1452 | BUG_ON(mm->core_waiters); |
| 1453 | return core_waiters; | ||
| 1443 | } | 1454 | } |
| 1444 | 1455 | ||
| 1445 | int do_coredump(long signr, int exit_code, struct pt_regs * regs) | 1456 | int do_coredump(long signr, int exit_code, struct pt_regs * regs) |
| @@ -1473,22 +1484,9 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) | |||
| 1473 | } | 1484 | } |
| 1474 | mm->dumpable = 0; | 1485 | mm->dumpable = 0; |
| 1475 | 1486 | ||
| 1476 | retval = -EAGAIN; | 1487 | retval = coredump_wait(exit_code); |
| 1477 | spin_lock_irq(¤t->sighand->siglock); | 1488 | if (retval < 0) |
| 1478 | if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) { | ||
| 1479 | current->signal->flags = SIGNAL_GROUP_EXIT; | ||
| 1480 | current->signal->group_exit_code = exit_code; | ||
| 1481 | current->signal->group_stop_count = 0; | ||
| 1482 | retval = 0; | ||
| 1483 | } | ||
| 1484 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 1485 | if (retval) { | ||
| 1486 | up_write(&mm->mmap_sem); | ||
| 1487 | goto fail; | 1489 | goto fail; |
| 1488 | } | ||
| 1489 | |||
| 1490 | init_completion(&mm->core_done); | ||
| 1491 | coredump_wait(mm); | ||
| 1492 | 1490 | ||
| 1493 | /* | 1491 | /* |
| 1494 | * Clear any false indication of pending signals that might | 1492 | * Clear any false indication of pending signals that might |
