diff options
author | Ionut Alexa <ionut.m.alexa@gmail.com> | 2014-08-08 17:21:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-08 18:57:22 -0400 |
commit | a0be55dee71d437f7593c8c3673edd92962bafaf (patch) | |
tree | a6304617295281f4f8bf30c3e4a0158e486d73cf /kernel/exit.c | |
parent | f58d6c75471c1b034b5bb0b38a7d6a9671f96299 (diff) |
kernel/exit.c: fix coding style warnings and errors
Fixed coding style warnings and errors.
Signed-off-by: Ionut Alexa <ionut.m.alexa@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 49 |
1 files changed, 26 insertions, 23 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 88c6b3e42583..32c58f7433a3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -59,7 +59,7 @@ | |||
59 | #include <asm/pgtable.h> | 59 | #include <asm/pgtable.h> |
60 | #include <asm/mmu_context.h> | 60 | #include <asm/mmu_context.h> |
61 | 61 | ||
62 | static void exit_mm(struct task_struct * tsk); | 62 | static void exit_mm(struct task_struct *tsk); |
63 | 63 | ||
64 | static void __unhash_process(struct task_struct *p, bool group_dead) | 64 | static void __unhash_process(struct task_struct *p, bool group_dead) |
65 | { | 65 | { |
@@ -151,7 +151,7 @@ static void __exit_signal(struct task_struct *tsk) | |||
151 | spin_unlock(&sighand->siglock); | 151 | spin_unlock(&sighand->siglock); |
152 | 152 | ||
153 | __cleanup_sighand(sighand); | 153 | __cleanup_sighand(sighand); |
154 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | 154 | clear_tsk_thread_flag(tsk, TIF_SIGPENDING); |
155 | if (group_dead) { | 155 | if (group_dead) { |
156 | flush_sigqueue(&sig->shared_pending); | 156 | flush_sigqueue(&sig->shared_pending); |
157 | tty_kref_put(tty); | 157 | tty_kref_put(tty); |
@@ -168,7 +168,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | 170 | ||
171 | void release_task(struct task_struct * p) | 171 | void release_task(struct task_struct *p) |
172 | { | 172 | { |
173 | struct task_struct *leader; | 173 | struct task_struct *leader; |
174 | int zap_leader; | 174 | int zap_leader; |
@@ -192,7 +192,8 @@ repeat: | |||
192 | */ | 192 | */ |
193 | zap_leader = 0; | 193 | zap_leader = 0; |
194 | leader = p->group_leader; | 194 | leader = p->group_leader; |
195 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { | 195 | if (leader != p && thread_group_empty(leader) |
196 | && leader->exit_state == EXIT_ZOMBIE) { | ||
196 | /* | 197 | /* |
197 | * If we were the last child thread and the leader has | 198 | * If we were the last child thread and the leader has |
198 | * exited already, and the leader's parent ignores SIGCHLD, | 199 | * exited already, and the leader's parent ignores SIGCHLD, |
@@ -241,7 +242,8 @@ struct pid *session_of_pgrp(struct pid *pgrp) | |||
241 | * | 242 | * |
242 | * "I ask you, have you ever known what it is to be an orphan?" | 243 | * "I ask you, have you ever known what it is to be an orphan?" |
243 | */ | 244 | */ |
244 | static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) | 245 | static int will_become_orphaned_pgrp(struct pid *pgrp, |
246 | struct task_struct *ignored_task) | ||
245 | { | 247 | { |
246 | struct task_struct *p; | 248 | struct task_struct *p; |
247 | 249 | ||
@@ -294,9 +296,9 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) | |||
294 | struct task_struct *ignored_task = tsk; | 296 | struct task_struct *ignored_task = tsk; |
295 | 297 | ||
296 | if (!parent) | 298 | if (!parent) |
297 | /* exit: our father is in a different pgrp than | 299 | /* exit: our father is in a different pgrp than |
298 | * we are and we were the only connection outside. | 300 | * we are and we were the only connection outside. |
299 | */ | 301 | */ |
300 | parent = tsk->real_parent; | 302 | parent = tsk->real_parent; |
301 | else | 303 | else |
302 | /* reparent: our child is in a different pgrp than | 304 | /* reparent: our child is in a different pgrp than |
@@ -405,7 +407,7 @@ assign_new_owner: | |||
405 | * Turn us into a lazy TLB process if we | 407 | * Turn us into a lazy TLB process if we |
406 | * aren't already.. | 408 | * aren't already.. |
407 | */ | 409 | */ |
408 | static void exit_mm(struct task_struct * tsk) | 410 | static void exit_mm(struct task_struct *tsk) |
409 | { | 411 | { |
410 | struct mm_struct *mm = tsk->mm; | 412 | struct mm_struct *mm = tsk->mm; |
411 | struct core_state *core_state; | 413 | struct core_state *core_state; |
@@ -425,6 +427,7 @@ static void exit_mm(struct task_struct * tsk) | |||
425 | core_state = mm->core_state; | 427 | core_state = mm->core_state; |
426 | if (core_state) { | 428 | if (core_state) { |
427 | struct core_thread self; | 429 | struct core_thread self; |
430 | |||
428 | up_read(&mm->mmap_sem); | 431 | up_read(&mm->mmap_sem); |
429 | 432 | ||
430 | self.task = tsk; | 433 | self.task = tsk; |
@@ -566,6 +569,7 @@ static void forget_original_parent(struct task_struct *father) | |||
566 | 569 | ||
567 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 570 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
568 | struct task_struct *t = p; | 571 | struct task_struct *t = p; |
572 | |||
569 | do { | 573 | do { |
570 | t->real_parent = reaper; | 574 | t->real_parent = reaper; |
571 | if (t->parent == father) { | 575 | if (t->parent == father) { |
@@ -599,7 +603,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
599 | /* | 603 | /* |
600 | * This does two things: | 604 | * This does two things: |
601 | * | 605 | * |
602 | * A. Make init inherit all the child processes | 606 | * A. Make init inherit all the child processes |
603 | * B. Check to see if any process groups have become orphaned | 607 | * B. Check to see if any process groups have become orphaned |
604 | * as a result of our exiting, and if they have any stopped | 608 | * as a result of our exiting, and if they have any stopped |
605 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) | 609 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) |
@@ -649,9 +653,8 @@ static void check_stack_usage(void) | |||
649 | 653 | ||
650 | spin_lock(&low_water_lock); | 654 | spin_lock(&low_water_lock); |
651 | if (free < lowest_to_date) { | 655 | if (free < lowest_to_date) { |
652 | printk(KERN_WARNING "%s (%d) used greatest stack depth: " | 656 | pr_warn("%s (%d) used greatest stack depth: %lu bytes left\n", |
653 | "%lu bytes left\n", | 657 | current->comm, task_pid_nr(current), free); |
654 | current->comm, task_pid_nr(current), free); | ||
655 | lowest_to_date = free; | 658 | lowest_to_date = free; |
656 | } | 659 | } |
657 | spin_unlock(&low_water_lock); | 660 | spin_unlock(&low_water_lock); |
@@ -692,8 +695,7 @@ void do_exit(long code) | |||
692 | * leave this task alone and wait for reboot. | 695 | * leave this task alone and wait for reboot. |
693 | */ | 696 | */ |
694 | if (unlikely(tsk->flags & PF_EXITING)) { | 697 | if (unlikely(tsk->flags & PF_EXITING)) { |
695 | printk(KERN_ALERT | 698 | pr_alert("Fixing recursive fault but reboot is needed!\n"); |
696 | "Fixing recursive fault but reboot is needed!\n"); | ||
697 | /* | 699 | /* |
698 | * We can do this unlocked here. The futex code uses | 700 | * We can do this unlocked here. The futex code uses |
699 | * this flag just to verify whether the pi state | 701 | * this flag just to verify whether the pi state |
@@ -717,9 +719,9 @@ void do_exit(long code) | |||
717 | raw_spin_unlock_wait(&tsk->pi_lock); | 719 | raw_spin_unlock_wait(&tsk->pi_lock); |
718 | 720 | ||
719 | if (unlikely(in_atomic())) | 721 | if (unlikely(in_atomic())) |
720 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | 722 | pr_info("note: %s[%d] exited with preempt_count %d\n", |
721 | current->comm, task_pid_nr(current), | 723 | current->comm, task_pid_nr(current), |
722 | preempt_count()); | 724 | preempt_count()); |
723 | 725 | ||
724 | acct_update_integrals(tsk); | 726 | acct_update_integrals(tsk); |
725 | /* sync mm's RSS info before statistics gathering */ | 727 | /* sync mm's RSS info before statistics gathering */ |
@@ -837,7 +839,6 @@ void do_exit(long code) | |||
837 | for (;;) | 839 | for (;;) |
838 | cpu_relax(); /* For when BUG is null */ | 840 | cpu_relax(); /* For when BUG is null */ |
839 | } | 841 | } |
840 | |||
841 | EXPORT_SYMBOL_GPL(do_exit); | 842 | EXPORT_SYMBOL_GPL(do_exit); |
842 | 843 | ||
843 | void complete_and_exit(struct completion *comp, long code) | 844 | void complete_and_exit(struct completion *comp, long code) |
@@ -847,7 +848,6 @@ void complete_and_exit(struct completion *comp, long code) | |||
847 | 848 | ||
848 | do_exit(code); | 849 | do_exit(code); |
849 | } | 850 | } |
850 | |||
851 | EXPORT_SYMBOL(complete_and_exit); | 851 | EXPORT_SYMBOL(complete_and_exit); |
852 | 852 | ||
853 | SYSCALL_DEFINE1(exit, int, error_code) | 853 | SYSCALL_DEFINE1(exit, int, error_code) |
@@ -870,6 +870,7 @@ do_group_exit(int exit_code) | |||
870 | exit_code = sig->group_exit_code; | 870 | exit_code = sig->group_exit_code; |
871 | else if (!thread_group_empty(current)) { | 871 | else if (!thread_group_empty(current)) { |
872 | struct sighand_struct *const sighand = current->sighand; | 872 | struct sighand_struct *const sighand = current->sighand; |
873 | |||
873 | spin_lock_irq(&sighand->siglock); | 874 | spin_lock_irq(&sighand->siglock); |
874 | if (signal_group_exit(sig)) | 875 | if (signal_group_exit(sig)) |
875 | /* Another thread got here before we took the lock. */ | 876 | /* Another thread got here before we took the lock. */ |
@@ -1034,9 +1035,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1034 | * as other threads in the parent group can be right | 1035 | * as other threads in the parent group can be right |
1035 | * here reaping other children at the same time. | 1036 | * here reaping other children at the same time. |
1036 | * | 1037 | * |
1037 | * We use thread_group_cputime_adjusted() to get times for the thread | 1038 | * We use thread_group_cputime_adjusted() to get times for |
1038 | * group, which consolidates times for all threads in the | 1039 | * the thread group, which consolidates times for all threads |
1039 | * group including the group leader. | 1040 | * in the group including the group leader. |
1040 | */ | 1041 | */ |
1041 | thread_group_cputime_adjusted(p, &tgutime, &tgstime); | 1042 | thread_group_cputime_adjusted(p, &tgutime, &tgstime); |
1042 | spin_lock_irq(&p->real_parent->sighand->siglock); | 1043 | spin_lock_irq(&p->real_parent->sighand->siglock); |
@@ -1418,6 +1419,7 @@ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) | |||
1418 | 1419 | ||
1419 | list_for_each_entry(p, &tsk->children, sibling) { | 1420 | list_for_each_entry(p, &tsk->children, sibling) { |
1420 | int ret = wait_consider_task(wo, 0, p); | 1421 | int ret = wait_consider_task(wo, 0, p); |
1422 | |||
1421 | if (ret) | 1423 | if (ret) |
1422 | return ret; | 1424 | return ret; |
1423 | } | 1425 | } |
@@ -1431,6 +1433,7 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) | |||
1431 | 1433 | ||
1432 | list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { | 1434 | list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { |
1433 | int ret = wait_consider_task(wo, 1, p); | 1435 | int ret = wait_consider_task(wo, 1, p); |
1436 | |||
1434 | if (ret) | 1437 | if (ret) |
1435 | return ret; | 1438 | return ret; |
1436 | } | 1439 | } |