aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2011-10-03 18:09:00 -0400
committerIngo Molnar <mingo@elte.hu>2011-10-06 06:46:23 -0400
commitca38062e57e97791c2f62e3dbd06caf3ebb5721c (patch)
treec022f6a3d3087cc8eaebbc5640ca145f7d9c50d8 /kernel
parent9243a169acb9df9c63632fb7d5464359a107877a (diff)
sched: Use resched IPI to kick off the nohz idle balance
Current use of smp call function to kick the nohz idle balance can deadlock in this scenario. 1. cpu-A did a generic_exec_single() to cpu-B and after queuing its call single data (csd) to the call single queue, cpu-A took a timer interrupt. Actual IPI to cpu-B to process the call single queue is not yet sent. 2. As part of the timer interrupt handler, cpu-A decided to kick cpu-B for the idle load balancing (sets cpu-B's rq->nohz_balance_kick to 1) and __smp_call_function_single() with nowait will queue the csd to the cpu-B's queue. But the generic_exec_single() won't send an IPI to cpu-B as the call single queue was not empty. 3. cpu-A is busy with lot of interrupts 4. Meanwhile cpu-B is entering and exiting idle and noticed that it has it's rq->nohz_balance_kick set to '1'. So it will go ahead and do the idle load balancer and clear its rq->nohz_balance_kick. 5. At this point, csd queued as part of the step-2 above is still locked and waiting to be serviced on cpu-B. 6. cpu-A is still busy with interrupt load and now it got another timer interrupt and as part of it decided to kick cpu-B for another idle load balancing (as it finds cpu-B's rq->nohz_balance_kick cleared in step-4 above) and does __smp_call_function_single() with the same csd that is still locked. 7. And we get a deadlock waiting for the csd_lock() in the __smp_call_function_single(). Main issue here is that cpu-B can service the idle load balancer kick request from cpu-A even with out receiving the IPI and this lead to doing multiple __smp_call_function_single() on the same csd leading to deadlock. To kick a cpu, scheduler already has the reschedule vector reserved. Use that mechanism (kick_process()) instead of using the generic smp call function mechanism to kick off the nohz idle load balancing and avoid the deadlock. [ This issue is present from 2.6.35+ kernels, but marking it -stable only from v3.0+ as the proposed fix depends on the scheduler_ipi() that is introduced recently. ] Reported-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: stable@kernel.org # v3.0+ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20111003220934.834943260@sbsiddha-desk.sc.intel.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c21
-rw-r--r--kernel/sched_fair.c29
2 files changed, 28 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4cdc91cf48f6..9e49af00ae3e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1404,6 +1404,18 @@ void wake_up_idle_cpu(int cpu)
1404 smp_send_reschedule(cpu); 1404 smp_send_reschedule(cpu);
1405} 1405}
1406 1406
1407static inline bool got_nohz_idle_kick(void)
1408{
1409 return idle_cpu(smp_processor_id()) && this_rq()->nohz_balance_kick;
1410}
1411
1412#else /* CONFIG_NO_HZ */
1413
1414static inline bool got_nohz_idle_kick(void)
1415{
1416 return false;
1417}
1418
1407#endif /* CONFIG_NO_HZ */ 1419#endif /* CONFIG_NO_HZ */
1408 1420
1409static u64 sched_avg_period(void) 1421static u64 sched_avg_period(void)
@@ -2717,7 +2729,7 @@ static void sched_ttwu_pending(void)
2717 2729
2718void scheduler_ipi(void) 2730void scheduler_ipi(void)
2719{ 2731{
2720 if (llist_empty(&this_rq()->wake_list)) 2732 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
2721 return; 2733 return;
2722 2734
2723 /* 2735 /*
@@ -2735,6 +2747,12 @@ void scheduler_ipi(void)
2735 */ 2747 */
2736 irq_enter(); 2748 irq_enter();
2737 sched_ttwu_pending(); 2749 sched_ttwu_pending();
2750
2751 /*
2752 * Check if someone kicked us for doing the nohz idle load balance.
2753 */
2754 if (unlikely(got_nohz_idle_kick() && !need_resched()))
2755 raise_softirq_irqoff(SCHED_SOFTIRQ);
2738 irq_exit(); 2756 irq_exit();
2739} 2757}
2740 2758
@@ -8288,7 +8306,6 @@ void __init sched_init(void)
8288 rq_attach_root(rq, &def_root_domain); 8306 rq_attach_root(rq, &def_root_domain);
8289#ifdef CONFIG_NO_HZ 8307#ifdef CONFIG_NO_HZ
8290 rq->nohz_balance_kick = 0; 8308 rq->nohz_balance_kick = 0;
8291 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
8292#endif 8309#endif
8293#endif 8310#endif
8294 init_rq_hrtick(rq); 8311 init_rq_hrtick(rq);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index fef0bfde7c8c..6c5fa1099229 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -4269,22 +4269,6 @@ out_unlock:
4269} 4269}
4270 4270
4271#ifdef CONFIG_NO_HZ 4271#ifdef CONFIG_NO_HZ
4272
4273static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
4274
4275static void trigger_sched_softirq(void *data)
4276{
4277 raise_softirq_irqoff(SCHED_SOFTIRQ);
4278}
4279
4280static inline void init_sched_softirq_csd(struct call_single_data *csd)
4281{
4282 csd->func = trigger_sched_softirq;
4283 csd->info = NULL;
4284 csd->flags = 0;
4285 csd->priv = 0;
4286}
4287
4288/* 4272/*
4289 * idle load balancing details 4273 * idle load balancing details
4290 * - One of the idle CPUs nominates itself as idle load_balancer, while 4274 * - One of the idle CPUs nominates itself as idle load_balancer, while
@@ -4450,11 +4434,16 @@ static void nohz_balancer_kick(int cpu)
4450 } 4434 }
4451 4435
4452 if (!cpu_rq(ilb_cpu)->nohz_balance_kick) { 4436 if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
4453 struct call_single_data *cp;
4454
4455 cpu_rq(ilb_cpu)->nohz_balance_kick = 1; 4437 cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
4456 cp = &per_cpu(remote_sched_softirq_cb, cpu); 4438
4457 __smp_call_function_single(ilb_cpu, cp, 0); 4439 smp_mb();
4440 /*
4441 * Use smp_send_reschedule() instead of resched_cpu().
4442 * This way we generate a sched IPI on the target cpu which
4443 * is idle. And the softirq performing nohz idle load balance
4444 * will be run before returning from the IPI.
4445 */
4446 smp_send_reschedule(ilb_cpu);
4458 } 4447 }
4459 return; 4448 return;
4460} 4449}