aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-09-23 12:50:41 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-23 13:46:29 -0400
commitfc2219d49ef1606e7fd2c88af2b423b01ff3d319 (patch)
tree1c1cba7e4334898a98cf0e2c992e8cfef4b34979 /kernel
parent0729e196147692d84d4c099fcff056eba2ed61d8 (diff)
rcu: Clean up code based on review feedback from Josh Triplett
These issues identified during an old-fashioned face-to-face code review extended over many hours. o Bury various forms of the "rsp->completed == rsp->gpnum" comparison into an rcu_gp_in_progress() function, which has the beneficial side-effect of forcing consistent use of ACCESS_ONCE(). o Replace hand-coded arithmetic with DIV_ROUND_UP(). o Bury several "!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x01])" instances into an rcu_preempted_readers() function, as this expression indicates that there are no readers blocked within RCU read-side critical sections blocking the current grace period. (Though there might well be similar readers blocking the next grace period.) o Remove a dangling rcu_restart_cpu() declaration that has been dangling for almost 20 minor releases of the kernel. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <12537246442687-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c29
-rw-r--r--kernel/rcutree.h6
-rw-r--r--kernel/rcutree_plugin.h50
3 files changed, 46 insertions, 39 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 52b06f6e158c..f85b6842d1e1 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -101,6 +101,16 @@ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
101#include "rcutree_plugin.h" 101#include "rcutree_plugin.h"
102 102
103/* 103/*
104 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
105 * permit this function to be invoked without holding the root rcu_node
106 * structure's ->lock, but of course results can be subject to change.
107 */
108static int rcu_gp_in_progress(struct rcu_state *rsp)
109{
110 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
111}
112
113/*
104 * Note a quiescent state. Because we do not need to know 114 * Note a quiescent state. Because we do not need to know
105 * how many quiescent states passed, just if there was at least 115 * how many quiescent states passed, just if there was at least
106 * one since the start of the grace period, this just sets a flag. 116 * one since the start of the grace period, this just sets a flag.
@@ -173,9 +183,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
173static int 183static int
174cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 184cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
175{ 185{
176 /* ACCESS_ONCE() because we are accessing outside of lock. */ 186 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
177 return *rdp->nxttail[RCU_DONE_TAIL] &&
178 ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
179} 187}
180 188
181/* 189/*
@@ -482,7 +490,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
482 490
483 spin_lock_irqsave(&rnp->lock, flags); 491 spin_lock_irqsave(&rnp->lock, flags);
484 delta = jiffies - rsp->jiffies_stall; 492 delta = jiffies - rsp->jiffies_stall;
485 if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { 493 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
486 spin_unlock_irqrestore(&rnp->lock, flags); 494 spin_unlock_irqrestore(&rnp->lock, flags);
487 return; 495 return;
488 } 496 }
@@ -537,8 +545,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
537 /* We haven't checked in, so go dump stack. */ 545 /* We haven't checked in, so go dump stack. */
538 print_cpu_stall(rsp); 546 print_cpu_stall(rsp);
539 547
540 } else if (rsp->gpnum != rsp->completed && 548 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
541 delta >= RCU_STALL_RAT_DELAY) {
542 549
543 /* They had two time units to dump stack, so complain. */ 550 /* They had two time units to dump stack, so complain. */
544 print_other_cpu_stall(rsp); 551 print_other_cpu_stall(rsp);
@@ -703,9 +710,9 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
703 * hold rnp->lock, as required by rcu_start_gp(), which will release it. 710 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
704 */ 711 */
705static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 712static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
706 __releases(rnp->lock) 713 __releases(rcu_get_root(rsp)->lock)
707{ 714{
708 WARN_ON_ONCE(rsp->completed == rsp->gpnum); 715 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
709 rsp->completed = rsp->gpnum; 716 rsp->completed = rsp->gpnum;
710 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 717 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
711 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 718 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
@@ -1092,7 +1099,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1092 struct rcu_node *rnp = rcu_get_root(rsp); 1099 struct rcu_node *rnp = rcu_get_root(rsp);
1093 u8 signaled; 1100 u8 signaled;
1094 1101
1095 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) 1102 if (!rcu_gp_in_progress(rsp))
1096 return; /* No grace period in progress, nothing to force. */ 1103 return; /* No grace period in progress, nothing to force. */
1097 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { 1104 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1098 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1105 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
@@ -1251,7 +1258,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1251 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1258 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1252 1259
1253 /* Start a new grace period if one not already started. */ 1260 /* Start a new grace period if one not already started. */
1254 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { 1261 if (!rcu_gp_in_progress(rsp)) {
1255 unsigned long nestflag; 1262 unsigned long nestflag;
1256 struct rcu_node *rnp_root = rcu_get_root(rsp); 1263 struct rcu_node *rnp_root = rcu_get_root(rsp);
1257 1264
@@ -1331,7 +1338,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1331 } 1338 }
1332 1339
1333 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1340 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1334 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1341 if (rcu_gp_in_progress(rsp) &&
1335 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1342 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
1336 rdp->n_rp_need_fqs++; 1343 rdp->n_rp_need_fqs++;
1337 return 1; 1344 return 1;
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 8e8287a983c2..9aa8c8a160d8 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -48,14 +48,14 @@
48#elif NR_CPUS <= RCU_FANOUT_SQ 48#elif NR_CPUS <= RCU_FANOUT_SQ
49# define NUM_RCU_LVLS 2 49# define NUM_RCU_LVLS 2
50# define NUM_RCU_LVL_0 1 50# define NUM_RCU_LVL_0 1
51# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) 51# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
52# define NUM_RCU_LVL_2 (NR_CPUS) 52# define NUM_RCU_LVL_2 (NR_CPUS)
53# define NUM_RCU_LVL_3 0 53# define NUM_RCU_LVL_3 0
54#elif NR_CPUS <= RCU_FANOUT_CUBE 54#elif NR_CPUS <= RCU_FANOUT_CUBE
55# define NUM_RCU_LVLS 3 55# define NUM_RCU_LVLS 3
56# define NUM_RCU_LVL_0 1 56# define NUM_RCU_LVL_0 1
57# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) 57# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
58# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) 58# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
59# define NUM_RCU_LVL_3 NR_CPUS 59# define NUM_RCU_LVL_3 NR_CPUS
60#else 60#else
61# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" 61# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 1cee04f627eb..8ff1ba7b3c43 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -150,6 +150,16 @@ void __rcu_read_lock(void)
150} 150}
151EXPORT_SYMBOL_GPL(__rcu_read_lock); 151EXPORT_SYMBOL_GPL(__rcu_read_lock);
152 152
153/*
154 * Check for preempted RCU readers blocking the current grace period
155 * for the specified rcu_node structure. If the caller needs a reliable
156 * answer, it must hold the rcu_node's ->lock.
157 */
158static int rcu_preempted_readers(struct rcu_node *rnp)
159{
160 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
161}
162
153static void rcu_read_unlock_special(struct task_struct *t) 163static void rcu_read_unlock_special(struct task_struct *t)
154{ 164{
155 int empty; 165 int empty;
@@ -196,7 +206,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
196 break; 206 break;
197 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 207 spin_unlock(&rnp->lock); /* irqs remain disabled. */
198 } 208 }
199 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); 209 empty = !rcu_preempted_readers(rnp);
200 list_del_init(&t->rcu_node_entry); 210 list_del_init(&t->rcu_node_entry);
201 t->rcu_blocked_node = NULL; 211 t->rcu_blocked_node = NULL;
202 212
@@ -207,7 +217,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
207 * drop rnp->lock and restore irq. 217 * drop rnp->lock and restore irq.
208 */ 218 */
209 if (!empty && rnp->qsmask == 0 && 219 if (!empty && rnp->qsmask == 0 &&
210 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { 220 !rcu_preempted_readers(rnp)) {
211 struct rcu_node *rnp_p; 221 struct rcu_node *rnp_p;
212 222
213 if (rnp->parent == NULL) { 223 if (rnp->parent == NULL) {
@@ -257,12 +267,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
257{ 267{
258 unsigned long flags; 268 unsigned long flags;
259 struct list_head *lp; 269 struct list_head *lp;
260 int phase = rnp->gpnum & 0x1; 270 int phase;
261 struct task_struct *t; 271 struct task_struct *t;
262 272
263 if (!list_empty(&rnp->blocked_tasks[phase])) { 273 if (rcu_preempted_readers(rnp)) {
264 spin_lock_irqsave(&rnp->lock, flags); 274 spin_lock_irqsave(&rnp->lock, flags);
265 phase = rnp->gpnum & 0x1; /* re-read under lock. */ 275 phase = rnp->gpnum & 0x1;
266 lp = &rnp->blocked_tasks[phase]; 276 lp = &rnp->blocked_tasks[phase];
267 list_for_each_entry(t, lp, rcu_node_entry) 277 list_for_each_entry(t, lp, rcu_node_entry)
268 printk(" P%d", t->pid); 278 printk(" P%d", t->pid);
@@ -281,20 +291,10 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
281 */ 291 */
282static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 292static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
283{ 293{
284 WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); 294 WARN_ON_ONCE(rcu_preempted_readers(rnp));
285 WARN_ON_ONCE(rnp->qsmask); 295 WARN_ON_ONCE(rnp->qsmask);
286} 296}
287 297
288/*
289 * Check for preempted RCU readers for the specified rcu_node structure.
290 * If the caller needs a reliable answer, it must hold the rcu_node's
291 * >lock.
292 */
293static int rcu_preempted_readers(struct rcu_node *rnp)
294{
295 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
296}
297
298#ifdef CONFIG_HOTPLUG_CPU 298#ifdef CONFIG_HOTPLUG_CPU
299 299
300/* 300/*
@@ -461,6 +461,15 @@ static void rcu_preempt_note_context_switch(int cpu)
461{ 461{
462} 462}
463 463
464/*
465 * Because preemptable RCU does not exist, there are never any preempted
466 * RCU readers.
467 */
468static int rcu_preempted_readers(struct rcu_node *rnp)
469{
470 return 0;
471}
472
464#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 473#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
465 474
466/* 475/*
@@ -483,15 +492,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
483 WARN_ON_ONCE(rnp->qsmask); 492 WARN_ON_ONCE(rnp->qsmask);
484} 493}
485 494
486/*
487 * Because preemptable RCU does not exist, there are never any preempted
488 * RCU readers.
489 */
490static int rcu_preempted_readers(struct rcu_node *rnp)
491{
492 return 0;
493}
494
495#ifdef CONFIG_HOTPLUG_CPU 495#ifdef CONFIG_HOTPLUG_CPU
496 496
497/* 497/*