aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorStanislaw Gruszka <sgruszka@redhat.com>2010-03-11 17:04:42 -0500
committerThomas Gleixner <tglx@linutronix.de>2010-03-12 16:40:41 -0500
commitc28739375bf0d6e239b4fa939ec8372aa2c707d2 (patch)
tree9d213503878c59b7adc9ca3e0517c566921f3c9a /kernel
parent1f169f84d25a74fb2dc67274d31d082ce30c60fb (diff)
cpu-timers: Avoid iterating over all threads in fastpath_timer_check()
Spread p->sighand->siglock locking scope to make sure that fastpath_timer_check() never iterates over all threads. Without locking there is small possibility that signal->cputimer will stop running while we write values to signal->cputime_expires. Calling thread_group_cputime() from fastpath_timer_check() is not only bad because it is slow, also it is racy with __exit_signal() which can lead to invalid signal->{s,u}time values. Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/posix-cpu-timers.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 7d9d0fab1651..564b3b0240dd 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -550,7 +550,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
550/* 550/*
551 * Insert the timer on the appropriate list before any timers that 551 * Insert the timer on the appropriate list before any timers that
552 * expire later. This must be called with the tasklist_lock held 552 * expire later. This must be called with the tasklist_lock held
553 * for reading, and interrupts disabled. 553 * for reading, interrupts disabled and p->sighand->siglock taken.
554 */ 554 */
555static void arm_timer(struct k_itimer *timer) 555static void arm_timer(struct k_itimer *timer)
556{ 556{
@@ -569,9 +569,6 @@ static void arm_timer(struct k_itimer *timer)
569 } 569 }
570 head += CPUCLOCK_WHICH(timer->it_clock); 570 head += CPUCLOCK_WHICH(timer->it_clock);
571 571
572 BUG_ON(!irqs_disabled());
573 spin_lock(&p->sighand->siglock);
574
575 listpos = head; 572 listpos = head;
576 list_for_each_entry(next, head, entry) { 573 list_for_each_entry(next, head, entry) {
577 if (cpu_time_before(timer->it_clock, nt->expires, next->expires)) 574 if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
@@ -606,8 +603,6 @@ static void arm_timer(struct k_itimer *timer)
606 break; 603 break;
607 } 604 }
608 } 605 }
609
610 spin_unlock(&p->sighand->siglock);
611} 606}
612 607
613/* 608/*
@@ -720,7 +715,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
720 ret = TIMER_RETRY; 715 ret = TIMER_RETRY;
721 } else 716 } else
722 list_del_init(&timer->it.cpu.entry); 717 list_del_init(&timer->it.cpu.entry);
723 spin_unlock(&p->sighand->siglock);
724 718
725 /* 719 /*
726 * We need to sample the current value to convert the new 720 * We need to sample the current value to convert the new
@@ -774,6 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
774 * disable this firing since we are already reporting 768 * disable this firing since we are already reporting
775 * it as an overrun (thanks to bump_cpu_timer above). 769 * it as an overrun (thanks to bump_cpu_timer above).
776 */ 770 */
771 spin_unlock(&p->sighand->siglock);
777 read_unlock(&tasklist_lock); 772 read_unlock(&tasklist_lock);
778 goto out; 773 goto out;
779 } 774 }
@@ -793,6 +788,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
793 arm_timer(timer); 788 arm_timer(timer);
794 } 789 }
795 790
791 spin_unlock(&p->sighand->siglock);
796 read_unlock(&tasklist_lock); 792 read_unlock(&tasklist_lock);
797 793
798 /* 794 /*
@@ -1206,6 +1202,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1206 goto out; 1202 goto out;
1207 } 1203 }
1208 read_lock(&tasklist_lock); /* arm_timer needs it. */ 1204 read_lock(&tasklist_lock); /* arm_timer needs it. */
1205 spin_lock(&p->sighand->siglock);
1209 } else { 1206 } else {
1210 read_lock(&tasklist_lock); 1207 read_lock(&tasklist_lock);
1211 if (unlikely(p->signal == NULL)) { 1208 if (unlikely(p->signal == NULL)) {
@@ -1226,6 +1223,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1226 clear_dead_task(timer, now); 1223 clear_dead_task(timer, now);
1227 goto out_unlock; 1224 goto out_unlock;
1228 } 1225 }
1226 spin_lock(&p->sighand->siglock);
1229 cpu_timer_sample_group(timer->it_clock, p, &now); 1227 cpu_timer_sample_group(timer->it_clock, p, &now);
1230 bump_cpu_timer(timer, now); 1228 bump_cpu_timer(timer, now);
1231 /* Leave the tasklist_lock locked for the call below. */ 1229 /* Leave the tasklist_lock locked for the call below. */
@@ -1234,7 +1232,9 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1234 /* 1232 /*
1235 * Now re-arm for the new expiry time. 1233 * Now re-arm for the new expiry time.
1236 */ 1234 */
1235 BUG_ON(!irqs_disabled());
1237 arm_timer(timer); 1236 arm_timer(timer);
1237 spin_unlock(&p->sighand->siglock);
1238 1238
1239out_unlock: 1239out_unlock:
1240 read_unlock(&tasklist_lock); 1240 read_unlock(&tasklist_lock);