aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2006-09-27 04:51:13 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 11:26:20 -0400
commitaafe6c2a2b6bce5a3a4913ce5c07e85ea143144d (patch)
tree602fefd6b12dad6351bb91661defce0abaf9b24d
parent66f37509fc7191df468a8d183374f48b13bacb73 (diff)
[PATCH] de_thread: Use tsk not current
Ingo Oeser pointed out that because current expands to an inline function it is more space efficient and somewhat faster to simply keep a cached copy of current in another variable. This patch implements that for the de_thread function. (akpm: saves nearly 100 bytes of text on x86) Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--fs/exec.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/fs/exec.c b/fs/exec.c
index b7aa3d6422d6..97df6e0aeaee 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -595,7 +595,7 @@ static int de_thread(struct task_struct *tsk)
595 if (!newsighand) 595 if (!newsighand)
596 return -ENOMEM; 596 return -ENOMEM;
597 597
598 if (thread_group_empty(current)) 598 if (thread_group_empty(tsk))
599 goto no_thread_group; 599 goto no_thread_group;
600 600
601 /* 601 /*
@@ -620,17 +620,17 @@ static int de_thread(struct task_struct *tsk)
620 * Reparenting needs write_lock on tasklist_lock, 620 * Reparenting needs write_lock on tasklist_lock,
621 * so it is safe to do it under read_lock. 621 * so it is safe to do it under read_lock.
622 */ 622 */
623 if (unlikely(current->group_leader == child_reaper)) 623 if (unlikely(tsk->group_leader == child_reaper))
624 child_reaper = current; 624 child_reaper = tsk;
625 625
626 zap_other_threads(current); 626 zap_other_threads(tsk);
627 read_unlock(&tasklist_lock); 627 read_unlock(&tasklist_lock);
628 628
629 /* 629 /*
630 * Account for the thread group leader hanging around: 630 * Account for the thread group leader hanging around:
631 */ 631 */
632 count = 1; 632 count = 1;
633 if (!thread_group_leader(current)) { 633 if (!thread_group_leader(tsk)) {
634 count = 2; 634 count = 2;
635 /* 635 /*
636 * The SIGALRM timer survives the exec, but needs to point 636 * The SIGALRM timer survives the exec, but needs to point
@@ -639,14 +639,14 @@ static int de_thread(struct task_struct *tsk)
639 * synchronize with any firing (by calling del_timer_sync) 639 * synchronize with any firing (by calling del_timer_sync)
640 * before we can safely let the old group leader die. 640 * before we can safely let the old group leader die.
641 */ 641 */
642 sig->tsk = current; 642 sig->tsk = tsk;
643 spin_unlock_irq(lock); 643 spin_unlock_irq(lock);
644 if (hrtimer_cancel(&sig->real_timer)) 644 if (hrtimer_cancel(&sig->real_timer))
645 hrtimer_restart(&sig->real_timer); 645 hrtimer_restart(&sig->real_timer);
646 spin_lock_irq(lock); 646 spin_lock_irq(lock);
647 } 647 }
648 while (atomic_read(&sig->count) > count) { 648 while (atomic_read(&sig->count) > count) {
649 sig->group_exit_task = current; 649 sig->group_exit_task = tsk;
650 sig->notify_count = count; 650 sig->notify_count = count;
651 __set_current_state(TASK_UNINTERRUPTIBLE); 651 __set_current_state(TASK_UNINTERRUPTIBLE);
652 spin_unlock_irq(lock); 652 spin_unlock_irq(lock);
@@ -662,13 +662,13 @@ static int de_thread(struct task_struct *tsk)
662 * do is to wait for the thread group leader to become inactive, 662 * do is to wait for the thread group leader to become inactive,
663 * and to assume its PID: 663 * and to assume its PID:
664 */ 664 */
665 if (!thread_group_leader(current)) { 665 if (!thread_group_leader(tsk)) {
666 /* 666 /*
667 * Wait for the thread group leader to be a zombie. 667 * Wait for the thread group leader to be a zombie.
668 * It should already be zombie at this point, most 668 * It should already be zombie at this point, most
669 * of the time. 669 * of the time.
670 */ 670 */
671 leader = current->group_leader; 671 leader = tsk->group_leader;
672 while (leader->exit_state != EXIT_ZOMBIE) 672 while (leader->exit_state != EXIT_ZOMBIE)
673 yield(); 673 yield();
674 674
@@ -682,12 +682,12 @@ static int de_thread(struct task_struct *tsk)
682 * When we take on its identity by switching to its PID, we 682 * When we take on its identity by switching to its PID, we
683 * also take its birthdate (always earlier than our own). 683 * also take its birthdate (always earlier than our own).
684 */ 684 */
685 current->start_time = leader->start_time; 685 tsk->start_time = leader->start_time;
686 686
687 write_lock_irq(&tasklist_lock); 687 write_lock_irq(&tasklist_lock);
688 688
689 BUG_ON(leader->tgid != current->tgid); 689 BUG_ON(leader->tgid != tsk->tgid);
690 BUG_ON(current->pid == current->tgid); 690 BUG_ON(tsk->pid == tsk->tgid);
691 /* 691 /*
692 * An exec() starts a new thread group with the 692 * An exec() starts a new thread group with the
693 * TGID of the previous thread group. Rehash the 693 * TGID of the previous thread group. Rehash the
@@ -700,17 +700,17 @@ static int de_thread(struct task_struct *tsk)
700 * Note: The old leader also uses this pid until release_task 700 * Note: The old leader also uses this pid until release_task
701 * is called. Odd but simple and correct. 701 * is called. Odd but simple and correct.
702 */ 702 */
703 detach_pid(current, PIDTYPE_PID); 703 detach_pid(tsk, PIDTYPE_PID);
704 current->pid = leader->pid; 704 tsk->pid = leader->pid;
705 attach_pid(current, PIDTYPE_PID, current->pid); 705 attach_pid(tsk, PIDTYPE_PID, tsk->pid);
706 transfer_pid(leader, current, PIDTYPE_PGID); 706 transfer_pid(leader, tsk, PIDTYPE_PGID);
707 transfer_pid(leader, current, PIDTYPE_SID); 707 transfer_pid(leader, tsk, PIDTYPE_SID);
708 list_replace_rcu(&leader->tasks, &current->tasks); 708 list_replace_rcu(&leader->tasks, &tsk->tasks);
709 709
710 current->group_leader = current; 710 tsk->group_leader = tsk;
711 leader->group_leader = current; 711 leader->group_leader = tsk;
712 712
713 current->exit_signal = SIGCHLD; 713 tsk->exit_signal = SIGCHLD;
714 714
715 BUG_ON(leader->exit_state != EXIT_ZOMBIE); 715 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
716 leader->exit_state = EXIT_DEAD; 716 leader->exit_state = EXIT_DEAD;
@@ -750,7 +750,7 @@ no_thread_group:
750 spin_lock(&oldsighand->siglock); 750 spin_lock(&oldsighand->siglock);
751 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING); 751 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
752 752
753 rcu_assign_pointer(current->sighand, newsighand); 753 rcu_assign_pointer(tsk->sighand, newsighand);
754 recalc_sigpending(); 754 recalc_sigpending();
755 755
756 spin_unlock(&newsighand->siglock); 756 spin_unlock(&newsighand->siglock);
@@ -761,7 +761,7 @@ no_thread_group:
761 kmem_cache_free(sighand_cachep, oldsighand); 761 kmem_cache_free(sighand_cachep, oldsighand);
762 } 762 }
763 763
764 BUG_ON(!thread_group_leader(current)); 764 BUG_ON(!thread_group_leader(tsk));
765 return 0; 765 return 0;
766} 766}
767 767