aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c53
-rw-r--r--kernel/rcutree.h12
-rw-r--r--kernel/rcutree_plugin.h4
3 files changed, 40 insertions, 29 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d5597830faf5..e2e272b5c277 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -462,8 +462,6 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
462 long delta; 462 long delta;
463 unsigned long flags; 463 unsigned long flags;
464 struct rcu_node *rnp = rcu_get_root(rsp); 464 struct rcu_node *rnp = rcu_get_root(rsp);
465 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
466 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
467 465
468 /* Only let one CPU complain about others per time interval. */ 466 /* Only let one CPU complain about others per time interval. */
469 467
@@ -474,18 +472,24 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
474 return; 472 return;
475 } 473 }
476 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 474 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
475
476 /*
477 * Now rat on any tasks that got kicked up to the root rcu_node
478 * due to CPU offlining.
479 */
480 rcu_print_task_stall(rnp);
477 spin_unlock_irqrestore(&rnp->lock, flags); 481 spin_unlock_irqrestore(&rnp->lock, flags);
478 482
479 /* OK, time to rat on our buddy... */ 483 /* OK, time to rat on our buddy... */
480 484
481 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 485 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
482 for (; rnp_cur < rnp_end; rnp_cur++) { 486 rcu_for_each_leaf_node(rsp, rnp) {
483 rcu_print_task_stall(rnp); 487 rcu_print_task_stall(rnp);
484 if (rnp_cur->qsmask == 0) 488 if (rnp->qsmask == 0)
485 continue; 489 continue;
486 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 490 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
487 if (rnp_cur->qsmask & (1UL << cpu)) 491 if (rnp->qsmask & (1UL << cpu))
488 printk(" %d", rnp_cur->grplo + cpu); 492 printk(" %d", rnp->grplo + cpu);
489 } 493 }
490 printk(" (detected by %d, t=%ld jiffies)\n", 494 printk(" (detected by %d, t=%ld jiffies)\n",
491 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 495 smp_processor_id(), (long)(jiffies - rsp->gp_start));
@@ -649,7 +653,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
649 * one corresponding to this CPU, due to the fact that we have 653 * one corresponding to this CPU, due to the fact that we have
650 * irqs disabled. 654 * irqs disabled.
651 */ 655 */
652 for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) { 656 rcu_for_each_node_breadth_first(rsp, rnp) {
653 spin_lock(&rnp->lock); /* irqs already disabled. */ 657 spin_lock(&rnp->lock); /* irqs already disabled. */
654 rcu_preempt_check_blocked_tasks(rnp); 658 rcu_preempt_check_blocked_tasks(rnp);
655 rnp->qsmask = rnp->qsmaskinit; 659 rnp->qsmask = rnp->qsmaskinit;
@@ -1042,33 +1046,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1042 int cpu; 1046 int cpu;
1043 unsigned long flags; 1047 unsigned long flags;
1044 unsigned long mask; 1048 unsigned long mask;
1045 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 1049 struct rcu_node *rnp;
1046 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
1047 1050
1048 for (; rnp_cur < rnp_end; rnp_cur++) { 1051 rcu_for_each_leaf_node(rsp, rnp) {
1049 mask = 0; 1052 mask = 0;
1050 spin_lock_irqsave(&rnp_cur->lock, flags); 1053 spin_lock_irqsave(&rnp->lock, flags);
1051 if (rsp->completed != lastcomp) { 1054 if (rsp->completed != lastcomp) {
1052 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1055 spin_unlock_irqrestore(&rnp->lock, flags);
1053 return 1; 1056 return 1;
1054 } 1057 }
1055 if (rnp_cur->qsmask == 0) { 1058 if (rnp->qsmask == 0) {
1056 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1059 spin_unlock_irqrestore(&rnp->lock, flags);
1057 continue; 1060 continue;
1058 } 1061 }
1059 cpu = rnp_cur->grplo; 1062 cpu = rnp->grplo;
1060 bit = 1; 1063 bit = 1;
1061 for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { 1064 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1062 if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1065 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1063 mask |= bit; 1066 mask |= bit;
1064 } 1067 }
1065 if (mask != 0 && rsp->completed == lastcomp) { 1068 if (mask != 0 && rsp->completed == lastcomp) {
1066 1069
1067 /* cpu_quiet_msk() releases rnp_cur->lock. */ 1070 /* cpu_quiet_msk() releases rnp->lock. */
1068 cpu_quiet_msk(mask, rsp, rnp_cur, flags); 1071 cpu_quiet_msk(mask, rsp, rnp, flags);
1069 continue; 1072 continue;
1070 } 1073 }
1071 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1074 spin_unlock_irqrestore(&rnp->lock, flags);
1072 } 1075 }
1073 return 0; 1076 return 0;
1074} 1077}
@@ -1550,6 +1553,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1550 */ 1553 */
1551#define RCU_INIT_FLAVOR(rsp, rcu_data) \ 1554#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1552do { \ 1555do { \
1556 int i; \
1557 int j; \
1558 struct rcu_node *rnp; \
1559 \
1553 rcu_init_one(rsp); \ 1560 rcu_init_one(rsp); \
1554 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1561 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1555 j = 0; \ 1562 j = 0; \
@@ -1564,10 +1571,6 @@ do { \
1564 1571
1565void __init __rcu_init(void) 1572void __init __rcu_init(void)
1566{ 1573{
1567 int i; /* All used by RCU_INIT_FLAVOR(). */
1568 int j;
1569 struct rcu_node *rnp;
1570
1571 rcu_bootup_announce(); 1574 rcu_bootup_announce();
1572#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1575#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1573 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1576 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index e6ab31cc28ba..676eecd371d9 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -106,6 +106,18 @@ struct rcu_node {
106 /* blocked_tasks[] array. */ 106 /* blocked_tasks[] array. */
107} ____cacheline_internodealigned_in_smp; 107} ____cacheline_internodealigned_in_smp;
108 108
109/*
110 * Do a full breadth-first scan of the rcu_node structures for the
111 * specified rcu_state structure.
112 */
113#define rcu_for_each_node_breadth_first(rsp, rnp) \
114 for ((rnp) = &(rsp)->node[0]; \
115 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
116
117#define rcu_for_each_leaf_node(rsp, rnp) \
118 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
119 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
120
109/* Index values for nxttail array in struct rcu_data. */ 121/* Index values for nxttail array in struct rcu_data. */
110#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ 122#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
111#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ 123#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 65250219ab6d..57200fe96d0a 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -423,10 +423,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
423 */ 423 */
424static void __init __rcu_init_preempt(void) 424static void __init __rcu_init_preempt(void)
425{ 425{
426 int i; /* All used by RCU_INIT_FLAVOR(). */
427 int j;
428 struct rcu_node *rnp;
429
430 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); 426 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
431} 427}
432 428