aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-08-27 18:00:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-29 09:34:40 -0400
commit868489660dabc0c28087cca3dbc1adbbc398c6fe (patch)
treecf991ec94ce29bccfef27213107748810c51a1ca
parentdd5d19bafd90d33043a4a14b2e2d98612caa293c (diff)
rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
Changes suggested by review comments from Josh Triplett and Mathieu Desnoyers. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Josh Triplett <josh@joshtriplett.org> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org LKML-Reference: <20090827220012.GA30525@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/rcutree.c13
-rw-r--r--kernel/rcutree.h2
-rw-r--r--kernel/rcutree_plugin.h10
4 files changed, 17 insertions, 12 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3fe03151a8e6..855fd0d3f174 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1163,6 +1163,8 @@ struct sched_rt_entity {
1163#endif 1163#endif
1164}; 1164};
1165 1165
1166struct rcu_node;
1167
1166struct task_struct { 1168struct task_struct {
1167 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1169 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1168 void *stack; 1170 void *stack;
@@ -1208,7 +1210,7 @@ struct task_struct {
1208#ifdef CONFIG_TREE_PREEMPT_RCU 1210#ifdef CONFIG_TREE_PREEMPT_RCU
1209 int rcu_read_lock_nesting; 1211 int rcu_read_lock_nesting;
1210 char rcu_read_unlock_special; 1212 char rcu_read_unlock_special;
1211 void *rcu_blocked_node; 1213 struct rcu_node *rcu_blocked_node;
1212 struct list_head rcu_node_entry; 1214 struct list_head rcu_node_entry;
1213#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1215#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1214 1216
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d903e2f2b840..71bc79791cd9 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -229,7 +229,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
229#endif /* #ifdef CONFIG_SMP */ 229#endif /* #ifdef CONFIG_SMP */
230 230
231#ifdef CONFIG_NO_HZ 231#ifdef CONFIG_NO_HZ
232static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
233 232
234/** 233/**
235 * rcu_enter_nohz - inform RCU that current CPU is entering nohz 234 * rcu_enter_nohz - inform RCU that current CPU is entering nohz
@@ -249,7 +248,7 @@ void rcu_enter_nohz(void)
249 rdtp = &__get_cpu_var(rcu_dynticks); 248 rdtp = &__get_cpu_var(rcu_dynticks);
250 rdtp->dynticks++; 249 rdtp->dynticks++;
251 rdtp->dynticks_nesting--; 250 rdtp->dynticks_nesting--;
252 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); 251 WARN_ON_ONCE(rdtp->dynticks & 0x1);
253 local_irq_restore(flags); 252 local_irq_restore(flags);
254} 253}
255 254
@@ -268,7 +267,7 @@ void rcu_exit_nohz(void)
268 rdtp = &__get_cpu_var(rcu_dynticks); 267 rdtp = &__get_cpu_var(rcu_dynticks);
269 rdtp->dynticks++; 268 rdtp->dynticks++;
270 rdtp->dynticks_nesting++; 269 rdtp->dynticks_nesting++;
271 WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); 270 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
272 local_irq_restore(flags); 271 local_irq_restore(flags);
273 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 272 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
274} 273}
@@ -287,7 +286,7 @@ void rcu_nmi_enter(void)
287 if (rdtp->dynticks & 0x1) 286 if (rdtp->dynticks & 0x1)
288 return; 287 return;
289 rdtp->dynticks_nmi++; 288 rdtp->dynticks_nmi++;
290 WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs); 289 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
291 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 290 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
292} 291}
293 292
@@ -306,7 +305,7 @@ void rcu_nmi_exit(void)
306 return; 305 return;
307 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 306 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
308 rdtp->dynticks_nmi++; 307 rdtp->dynticks_nmi++;
309 WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs); 308 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
310} 309}
311 310
312/** 311/**
@@ -322,7 +321,7 @@ void rcu_irq_enter(void)
322 if (rdtp->dynticks_nesting++) 321 if (rdtp->dynticks_nesting++)
323 return; 322 return;
324 rdtp->dynticks++; 323 rdtp->dynticks++;
325 WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); 324 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
326 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 325 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
327} 326}
328 327
@@ -341,7 +340,7 @@ void rcu_irq_exit(void)
341 return; 340 return;
342 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 341 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
343 rdtp->dynticks++; 342 rdtp->dynticks++;
344 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); 343 WARN_ON_ONCE(rdtp->dynticks & 0x1);
345 344
346 /* If the interrupt queued a callback, get out of dyntick mode. */ 345 /* If the interrupt queued a callback, get out of dyntick mode. */
347 if (__get_cpu_var(rcu_sched_data).nxtlist || 346 if (__get_cpu_var(rcu_sched_data).nxtlist ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index ca560364d8cd..bf8a6f9f134d 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -81,6 +81,8 @@ struct rcu_dynticks {
81struct rcu_node { 81struct rcu_node {
82 spinlock_t lock; 82 spinlock_t lock;
83 long gpnum; /* Current grace period for this node. */ 83 long gpnum; /* Current grace period for this node. */
84 /* This will either be equal to or one */
85 /* behind the root rcu_node's gpnum. */
84 unsigned long qsmask; /* CPUs or groups that need to switch in */ 86 unsigned long qsmask; /* CPUs or groups that need to switch in */
85 /* order for current grace period to proceed.*/ 87 /* order for current grace period to proceed.*/
86 unsigned long qsmaskinit; 88 unsigned long qsmaskinit;
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 04343bee646d..47789369ea59 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -92,7 +92,7 @@ static void rcu_preempt_qs(int cpu)
92 rnp = rdp->mynode; 92 rnp = rdp->mynode;
93 spin_lock(&rnp->lock); 93 spin_lock(&rnp->lock);
94 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 94 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
95 t->rcu_blocked_node = (void *)rnp; 95 t->rcu_blocked_node = rnp;
96 96
97 /* 97 /*
98 * If this CPU has already checked in, then this task 98 * If this CPU has already checked in, then this task
@@ -176,9 +176,9 @@ static void rcu_read_unlock_special(struct task_struct *t)
176 * most one time. So at most two passes through loop. 176 * most one time. So at most two passes through loop.
177 */ 177 */
178 for (;;) { 178 for (;;) {
179 rnp = (struct rcu_node *)t->rcu_blocked_node; 179 rnp = t->rcu_blocked_node;
180 spin_lock(&rnp->lock); 180 spin_lock(&rnp->lock);
181 if (rnp == (struct rcu_node *)t->rcu_blocked_node) 181 if (rnp == t->rcu_blocked_node)
182 break; 182 break;
183 spin_unlock(&rnp->lock); 183 spin_unlock(&rnp->lock);
184 } 184 }
@@ -288,8 +288,10 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
288 struct rcu_node *rnp_root = rcu_get_root(rsp); 288 struct rcu_node *rnp_root = rcu_get_root(rsp);
289 struct task_struct *tp; 289 struct task_struct *tp;
290 290
291 if (rnp == rnp_root) 291 if (rnp == rnp_root) {
292 WARN_ONCE(1, "Last CPU thought to be offlined?");
292 return; /* Shouldn't happen: at least one CPU online. */ 293 return; /* Shouldn't happen: at least one CPU online. */
294 }
293 295
294 /* 296 /*
295 * Move tasks up to root rcu_node. Rely on the fact that the 297 * Move tasks up to root rcu_node. Rely on the fact that the