diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-08-27 18:00:12 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-08-29 09:34:40 -0400 |
| commit | 868489660dabc0c28087cca3dbc1adbbc398c6fe (patch) | |
| tree | cf991ec94ce29bccfef27213107748810c51a1ca /kernel/rcutree_plugin.h | |
| parent | dd5d19bafd90d33043a4a14b2e2d98612caa293c (diff) | |
rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
Changes suggested by review comments from Josh Triplett and
Mathieu Desnoyers.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Josh Triplett <josh@joshtriplett.org>
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <20090827220012.GA30525@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
| -rw-r--r-- | kernel/rcutree_plugin.h | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 04343bee646d..47789369ea59 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -92,7 +92,7 @@ static void rcu_preempt_qs(int cpu) | |||
| 92 | rnp = rdp->mynode; | 92 | rnp = rdp->mynode; |
| 93 | spin_lock(&rnp->lock); | 93 | spin_lock(&rnp->lock); |
| 94 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 94 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
| 95 | t->rcu_blocked_node = (void *)rnp; | 95 | t->rcu_blocked_node = rnp; |
| 96 | 96 | ||
| 97 | /* | 97 | /* |
| 98 | * If this CPU has already checked in, then this task | 98 | * If this CPU has already checked in, then this task |
| @@ -176,9 +176,9 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
| 176 | * most one time. So at most two passes through loop. | 176 | * most one time. So at most two passes through loop. |
| 177 | */ | 177 | */ |
| 178 | for (;;) { | 178 | for (;;) { |
| 179 | rnp = (struct rcu_node *)t->rcu_blocked_node; | 179 | rnp = t->rcu_blocked_node; |
| 180 | spin_lock(&rnp->lock); | 180 | spin_lock(&rnp->lock); |
| 181 | if (rnp == (struct rcu_node *)t->rcu_blocked_node) | 181 | if (rnp == t->rcu_blocked_node) |
| 182 | break; | 182 | break; |
| 183 | spin_unlock(&rnp->lock); | 183 | spin_unlock(&rnp->lock); |
| 184 | } | 184 | } |
| @@ -288,8 +288,10 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
| 288 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 288 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
| 289 | struct task_struct *tp; | 289 | struct task_struct *tp; |
| 290 | 290 | ||
| 291 | if (rnp == rnp_root) | 291 | if (rnp == rnp_root) { |
| 292 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | ||
| 292 | return; /* Shouldn't happen: at least one CPU online. */ | 293 | return; /* Shouldn't happen: at least one CPU online. */ |
| 294 | } | ||
| 293 | 295 | ||
| 294 | /* | 296 | /* |
| 295 | * Move tasks up to root rcu_node. Rely on the fact that the | 297 | * Move tasks up to root rcu_node. Rely on the fact that the |
