diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 10:23:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 10:23:15 -0400 |
commit | faafcba3b5e15999cf75d5c5a513ac8e47e2545f (patch) | |
tree | 47d58d1c00e650e820506c91eb9a41268756bdda /kernel/exit.c | |
parent | 13ead805c5a14b0e7ecd34f61404a5bfba655895 (diff) | |
parent | f10e00f4bf360c36edbe6bf18a6c75b171cbe012 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The main changes in this cycle were:
- Optimized support for Intel "Cluster-on-Die" (CoD) topologies (Dave
Hansen)
- Various sched/idle refinements for better idle handling (Nicolas
Pitre, Daniel Lezcano, Chuansheng Liu, Vincent Guittot)
- sched/numa updates and optimizations (Rik van Riel)
- sysbench speedup (Vincent Guittot)
- capacity calculation cleanups/refactoring (Vincent Guittot)
- Various cleanups to thread group iteration (Oleg Nesterov)
- Double-rq-lock removal optimization and various refactorings
(Kirill Tkhai)
- various sched/deadline fixes
... and lots of other changes"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (72 commits)
sched/dl: Use dl_bw_of() under rcu_read_lock_sched()
sched/fair: Delete resched_cpu() from idle_balance()
sched, time: Fix build error with 64 bit cputime_t on 32 bit systems
sched: Improve sysbench performance by fixing spurious active migration
sched/x86: Fix up typo in topology detection
x86, sched: Add new topology for multi-NUMA-node CPUs
sched/rt: Use resched_curr() in task_tick_rt()
sched: Use rq->rd in sched_setaffinity() under RCU read lock
sched: cleanup: Rename 'out_unlock' to 'out_free_new_mask'
sched: Use dl_bw_of() under RCU read lock
sched/fair: Remove duplicate code from can_migrate_task()
sched, mips, ia64: Remove __ARCH_WANT_UNLOCKED_CTXSW
sched: print_rq(): Don't use tasklist_lock
sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock()
sched: Fix the task-group check in tg_has_rt_tasks()
sched/fair: Leverage the idle state info when choosing the "idlest" cpu
sched: Let the scheduler see CPU idle states
sched/deadline: Fix inter- exclusive cpusets migrations
sched/deadline: Clear dl_entity params when setscheduling to different class
sched/numa: Kill the wrong/dead TASK_DEAD check in task_numa_fault()
...
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 47 |
1 files changed, 25 insertions, 22 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index d13f2eec4bb8..5d30019ff953 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -115,32 +115,33 @@ static void __exit_signal(struct task_struct *tsk) | |||
115 | 115 | ||
116 | if (tsk == sig->curr_target) | 116 | if (tsk == sig->curr_target) |
117 | sig->curr_target = next_thread(tsk); | 117 | sig->curr_target = next_thread(tsk); |
118 | /* | ||
119 | * Accumulate here the counters for all threads but the | ||
120 | * group leader as they die, so they can be added into | ||
121 | * the process-wide totals when those are taken. | ||
122 | * The group leader stays around as a zombie as long | ||
123 | * as there are other threads. When it gets reaped, | ||
124 | * the exit.c code will add its counts into these totals. | ||
125 | * We won't ever get here for the group leader, since it | ||
126 | * will have been the last reference on the signal_struct. | ||
127 | */ | ||
128 | task_cputime(tsk, &utime, &stime); | ||
129 | sig->utime += utime; | ||
130 | sig->stime += stime; | ||
131 | sig->gtime += task_gtime(tsk); | ||
132 | sig->min_flt += tsk->min_flt; | ||
133 | sig->maj_flt += tsk->maj_flt; | ||
134 | sig->nvcsw += tsk->nvcsw; | ||
135 | sig->nivcsw += tsk->nivcsw; | ||
136 | sig->inblock += task_io_get_inblock(tsk); | ||
137 | sig->oublock += task_io_get_oublock(tsk); | ||
138 | task_io_accounting_add(&sig->ioac, &tsk->ioac); | ||
139 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; | ||
140 | } | 118 | } |
141 | 119 | ||
120 | /* | ||
121 | * Accumulate here the counters for all threads but the group leader | ||
122 | * as they die, so they can be added into the process-wide totals | ||
123 | * when those are taken. The group leader stays around as a zombie as | ||
124 | * long as there are other threads. When it gets reaped, the exit.c | ||
125 | * code will add its counts into these totals. We won't ever get here | ||
126 | * for the group leader, since it will have been the last reference on | ||
127 | * the signal_struct. | ||
128 | */ | ||
129 | task_cputime(tsk, &utime, &stime); | ||
130 | write_seqlock(&sig->stats_lock); | ||
131 | sig->utime += utime; | ||
132 | sig->stime += stime; | ||
133 | sig->gtime += task_gtime(tsk); | ||
134 | sig->min_flt += tsk->min_flt; | ||
135 | sig->maj_flt += tsk->maj_flt; | ||
136 | sig->nvcsw += tsk->nvcsw; | ||
137 | sig->nivcsw += tsk->nivcsw; | ||
138 | sig->inblock += task_io_get_inblock(tsk); | ||
139 | sig->oublock += task_io_get_oublock(tsk); | ||
140 | task_io_accounting_add(&sig->ioac, &tsk->ioac); | ||
141 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; | ||
142 | sig->nr_threads--; | 142 | sig->nr_threads--; |
143 | __unhash_process(tsk, group_dead); | 143 | __unhash_process(tsk, group_dead); |
144 | write_sequnlock(&sig->stats_lock); | ||
144 | 145 | ||
145 | /* | 146 | /* |
146 | * Do this under ->siglock, we can race with another thread | 147 | * Do this under ->siglock, we can race with another thread |
@@ -1046,6 +1047,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1046 | spin_lock_irq(&p->real_parent->sighand->siglock); | 1047 | spin_lock_irq(&p->real_parent->sighand->siglock); |
1047 | psig = p->real_parent->signal; | 1048 | psig = p->real_parent->signal; |
1048 | sig = p->signal; | 1049 | sig = p->signal; |
1050 | write_seqlock(&psig->stats_lock); | ||
1049 | psig->cutime += tgutime + sig->cutime; | 1051 | psig->cutime += tgutime + sig->cutime; |
1050 | psig->cstime += tgstime + sig->cstime; | 1052 | psig->cstime += tgstime + sig->cstime; |
1051 | psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; | 1053 | psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; |
@@ -1068,6 +1070,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1068 | psig->cmaxrss = maxrss; | 1070 | psig->cmaxrss = maxrss; |
1069 | task_io_accounting_add(&psig->ioac, &p->ioac); | 1071 | task_io_accounting_add(&psig->ioac, &p->ioac); |
1070 | task_io_accounting_add(&psig->ioac, &sig->ioac); | 1072 | task_io_accounting_add(&psig->ioac, &sig->ioac); |
1073 | write_sequnlock(&psig->stats_lock); | ||
1071 | spin_unlock_irq(&p->real_parent->sighand->siglock); | 1074 | spin_unlock_irq(&p->real_parent->sighand->siglock); |
1072 | } | 1075 | } |
1073 | 1076 | ||