aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-cpu-timers.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2006-10-17 03:09:39 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-17 11:18:43 -0400
commitac08c26492a0ad4d94a25bd47d5630cd38337069 (patch)
treea6115a173a221ef80a9ffe9b4f110bd4d92443c5 /kernel/posix-cpu-timers.c
parente24650c2e744f99541125a5b023f0d02cad19d14 (diff)
[PATCH] posix-cpu-timers: prevent signal delivery starvation
The integer divisions in the timer accounting code can round the result down to 0. Adding 0 is without effect and the signal delivery stops. Clamp the division result to minimum 1 to avoid this. Problem was reported by Seongbae Park <spark@google.com>, who provided also an inital patch. Roland sayeth: I have had some more time to think about the problem, and to reproduce it using Toyo's test case. For the record, if my understanding of the problem is correct, this happens only in one very particular case. First, the expiry time has to be so soon that in cputime_t units (usually 1s/HZ ticks) it's < nthreads so the division yields zero. Second, it only affects each thread that is so new that its CPU time accumulation is zero so now+0 is still zero and ->it_*_expires winds up staying zero. For the VIRT and PROF clocks when cputime_t is tick granularity (or the SCHED clock on configurations where sched_clock's value only advances on clock ticks), this is not hard to arrange with new threads starting up and blocking before they accumulate a whole tick of CPU time. That's what happens in Toyo's test case. Note that in general it is fine for that division to round down to zero, and set each thread's expiry time to its "now" time. The problem only arises with thread's whose "now" value is still zero, so that now+0 winds up 0 and is interpreted as "not set" instead of ">= now". So it would be a sufficient and more precise fix to just use max(ticks, 1) inside the loop when setting each it_*_expires value. But, it does no harm to round the division up to one and always advance every thread's expiry time. If the thread didn't already fire timers for the expiry time of "now", there is no expectation that it will do so before the next tick anyway. So I followed Thomas's patch in lifting the max out of the loops. This patch also covers the reload cases, which are harder to write a test for (and I didn't try). I've tested it with Toyo's case and it fixes that. [toyoa@mvista.com: fix: min_t -> max_t] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Daniel Walker <dwalker@mvista.com> Cc: Toyo Abe <toyoa@mvista.com> Cc: john stultz <johnstul@us.ibm.com> Cc: Roman Zippel <zippel@linux-m68k.org> Cc: Seongbae Park <spark@google.com> Cc: Peter Mattis <pmattis@google.com> Cc: Rohit Seth <rohitseth@google.com> Cc: Martin Bligh <mbligh@google.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r--kernel/posix-cpu-timers.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 479b16b44f79..7c3e1e6dfb5b 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -88,6 +88,19 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
88} 88}
89 89
90/* 90/*
91 * Divide and limit the result to res >= 1
92 *
93 * This is necessary to prevent signal delivery starvation, when the result of
94 * the division would be rounded down to 0.
95 */
96static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
97{
98 cputime_t res = cputime_div(time, div);
99
100 return max_t(cputime_t, res, 1);
101}
102
103/*
91 * Update expiry time from increment, and increase overrun count, 104 * Update expiry time from increment, and increase overrun count,
92 * given the current clock sample. 105 * given the current clock sample.
93 */ 106 */
@@ -483,8 +496,8 @@ static void process_timer_rebalance(struct task_struct *p,
483 BUG(); 496 BUG();
484 break; 497 break;
485 case CPUCLOCK_PROF: 498 case CPUCLOCK_PROF:
486 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 499 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
487 nthreads); 500 nthreads);
488 do { 501 do {
489 if (likely(!(t->flags & PF_EXITING))) { 502 if (likely(!(t->flags & PF_EXITING))) {
490 ticks = cputime_add(prof_ticks(t), left); 503 ticks = cputime_add(prof_ticks(t), left);
@@ -498,8 +511,8 @@ static void process_timer_rebalance(struct task_struct *p,
498 } while (t != p); 511 } while (t != p);
499 break; 512 break;
500 case CPUCLOCK_VIRT: 513 case CPUCLOCK_VIRT:
501 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 514 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
502 nthreads); 515 nthreads);
503 do { 516 do {
504 if (likely(!(t->flags & PF_EXITING))) { 517 if (likely(!(t->flags & PF_EXITING))) {
505 ticks = cputime_add(virt_ticks(t), left); 518 ticks = cputime_add(virt_ticks(t), left);
@@ -515,6 +528,7 @@ static void process_timer_rebalance(struct task_struct *p,
515 case CPUCLOCK_SCHED: 528 case CPUCLOCK_SCHED:
516 nsleft = expires.sched - val.sched; 529 nsleft = expires.sched - val.sched;
517 do_div(nsleft, nthreads); 530 do_div(nsleft, nthreads);
531 nsleft = max_t(unsigned long long, nsleft, 1);
518 do { 532 do {
519 if (likely(!(t->flags & PF_EXITING))) { 533 if (likely(!(t->flags & PF_EXITING))) {
520 ns = t->sched_time + nsleft; 534 ns = t->sched_time + nsleft;
@@ -1159,12 +1173,13 @@ static void check_process_timers(struct task_struct *tsk,
1159 1173
1160 prof_left = cputime_sub(prof_expires, utime); 1174 prof_left = cputime_sub(prof_expires, utime);
1161 prof_left = cputime_sub(prof_left, stime); 1175 prof_left = cputime_sub(prof_left, stime);
1162 prof_left = cputime_div(prof_left, nthreads); 1176 prof_left = cputime_div_non_zero(prof_left, nthreads);
1163 virt_left = cputime_sub(virt_expires, utime); 1177 virt_left = cputime_sub(virt_expires, utime);
1164 virt_left = cputime_div(virt_left, nthreads); 1178 virt_left = cputime_div_non_zero(virt_left, nthreads);
1165 if (sched_expires) { 1179 if (sched_expires) {
1166 sched_left = sched_expires - sched_time; 1180 sched_left = sched_expires - sched_time;
1167 do_div(sched_left, nthreads); 1181 do_div(sched_left, nthreads);
1182 sched_left = max_t(unsigned long long, sched_left, 1);
1168 } else { 1183 } else {
1169 sched_left = 0; 1184 sched_left = 0;
1170 } 1185 }