aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-09-23 12:50:42 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-23 13:46:29 -0400
commit1eba8f84380bede3c602bd7758dea96925cead01 (patch)
tree32dd97671016c9fdc24d75be50cc8540e6cc0730 /kernel/rcutree.c
parentfc2219d49ef1606e7fd2c88af2b423b01ff3d319 (diff)
rcu: Clean up code based on review feedback from Josh Triplett, part 2
These issues identified during an old-fashioned face-to-face code review extending over many hours. o Add comments for tricky parts of code, and correct comments that have passed their sell-by date. o Get rid of the vestiges of rcu_init_sched(), which is no longer needed now that PREEMPT_RCU is gone. o Move the #include of rcutree_plugin.h to the end of rcutree.c, which means that, rather than having a random collection of forward declarations, the new set of forward declarations document the set of plugins. The new home for this #include also allows __rcu_init_preempt() to move into rcutree_plugin.h. o Fix rcu_preempt_check_callbacks() to be static. Suggested-by: Josh Triplett <josh@joshtriplett.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <12537246443924-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu> Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c72
1 files changed, 33 insertions, 39 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f85b6842d1e1..53a5ef0ca911 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -81,24 +81,29 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 83
84extern long rcu_batches_completed_sched(void); 84/* Forward declarations for rcutree_plugin.h */
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp); 85static inline void rcu_bootup_announce(void);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, 86long rcu_batches_completed(void);
87 struct rcu_node *rnp, unsigned long flags); 87static void rcu_preempt_note_context_switch(int cpu);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags); 88static int rcu_preempted_readers(struct rcu_node *rnp);
89#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
90static void rcu_print_task_stall(struct rcu_node *rnp);
91#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
92static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
89#ifdef CONFIG_HOTPLUG_CPU 93#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp); 94static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
95 struct rcu_node *rnp,
96 struct rcu_data *rdp);
97static void rcu_preempt_offline_cpu(int cpu);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 98#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp, 99static void rcu_preempt_check_callbacks(int cpu);
93 struct rcu_data *rdp); 100static void rcu_preempt_process_callbacks(void);
94static void __call_rcu(struct rcu_head *head, 101void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
95 void (*func)(struct rcu_head *rcu), 102static int rcu_preempt_pending(int cpu);
96 struct rcu_state *rsp); 103static int rcu_preempt_needs_cpu(int cpu);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp); 104static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, 105static void __init __rcu_init_preempt(void);
99 int preemptable);
100 106
101#include "rcutree_plugin.h"
102 107
103/* 108/*
104 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 109 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
@@ -377,7 +382,7 @@ static long dyntick_recall_completed(struct rcu_state *rsp)
377/* 382/*
378 * Snapshot the specified CPU's dynticks counter so that we can later 383 * Snapshot the specified CPU's dynticks counter so that we can later
379 * credit them with an implicit quiescent state. Return 1 if this CPU 384 * credit them with an implicit quiescent state. Return 1 if this CPU
380 * is already in a quiescent state courtesy of dynticks idle mode. 385 * is in dynticks idle mode, which is an extended quiescent state.
381 */ 386 */
382static int dyntick_save_progress_counter(struct rcu_data *rdp) 387static int dyntick_save_progress_counter(struct rcu_data *rdp)
383{ 388{
@@ -624,9 +629,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
624 note_new_gpnum(rsp, rdp); 629 note_new_gpnum(rsp, rdp);
625 630
626 /* 631 /*
627 * Because we are first, we know that all our callbacks will 632 * Because this CPU just now started the new grace period, we know
628 * be covered by this upcoming grace period, even the ones 633 * that all of its callbacks will be covered by this upcoming grace
629 * that were registered arbitrarily recently. 634 * period, even the ones that were registered arbitrarily recently.
635 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
636 *
637 * Other CPUs cannot be sure exactly when the grace period started.
638 * Therefore, their recently registered callbacks must pass through
639 * an additional RCU_NEXT_READY stage, so that they will be handled
640 * by the next RCU grace period.
630 */ 641 */
631 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 642 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
632 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 643 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
@@ -886,7 +897,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
886 897
887 /* 898 /*
888 * Move callbacks from the outgoing CPU to the running CPU. 899 * Move callbacks from the outgoing CPU to the running CPU.
889 * Note that the outgoing CPU is now quiscent, so it is now 900 * Note that the outgoing CPU is now quiescent, so it is now
890 * (uncharacteristically) safe to access its rcu_data structure. 901 * (uncharacteristically) safe to access its rcu_data structure.
891 * Note also that we must carefully retain the order of the 902 * Note also that we must carefully retain the order of the
892 * outgoing CPU's callbacks in order for rcu_barrier() to work 903 * outgoing CPU's callbacks in order for rcu_barrier() to work
@@ -1577,25 +1588,6 @@ do { \
1577 } \ 1588 } \
1578} while (0) 1589} while (0)
1579 1590
1580#ifdef CONFIG_TREE_PREEMPT_RCU
1581
1582void __init __rcu_init_preempt(void)
1583{
1584 int i; /* All used by RCU_INIT_FLAVOR(). */
1585 int j;
1586 struct rcu_node *rnp;
1587
1588 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1589}
1590
1591#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1592
1593void __init __rcu_init_preempt(void)
1594{
1595}
1596
1597#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1598
1599void __init __rcu_init(void) 1591void __init __rcu_init(void)
1600{ 1592{
1601 int i; /* All used by RCU_INIT_FLAVOR(). */ 1593 int i; /* All used by RCU_INIT_FLAVOR(). */
@@ -1612,6 +1604,8 @@ void __init __rcu_init(void)
1612 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1604 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1613} 1605}
1614 1606
1607#include "rcutree_plugin.h"
1608
1615module_param(blimit, int, 0); 1609module_param(blimit, int, 0);
1616module_param(qhimark, int, 0); 1610module_param(qhimark, int, 0);
1617module_param(qlowmark, int, 0); 1611module_param(qlowmark, int, 0);