aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-11-22 11:53:50 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-22 12:58:16 -0500
commit6ebb237bece23275d1da149b61a342f0d4d06a08 (patch)
tree5f3d99f6ce2e1e98736e5708eecb5bf217f78fde
parent9f680ab41485edfdc96331b70afa7513aa0a7720 (diff)
rcu: Re-arrange code to reduce #ifdef pain
Remove #ifdefs from kernel/rcupdate.c and include/linux/rcupdate.h by moving code to include/linux/rcutiny.h, include/linux/rcutree.h, and kernel/rcutree.c. Also remove some definitions that are no longer used. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1258908830885-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/rcupdate.h12
-rw-r--r--include/linux/rcutiny.h11
-rw-r--r--include/linux/rcutree.h4
-rw-r--r--kernel/rcupdate.c104
-rw-r--r--kernel/rcutree.c80
-rw-r--r--kernel/rcutree_plugin.h24
6 files changed, 118 insertions, 117 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2f1bc42a3b82..24440f4bf476 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,11 +52,6 @@ struct rcu_head {
52}; 52};
53 53
54/* Exported common interfaces */ 54/* Exported common interfaces */
55#ifdef CONFIG_TREE_PREEMPT_RCU
56extern void synchronize_rcu(void);
57#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
58#define synchronize_rcu synchronize_sched
59#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
60extern void synchronize_rcu_bh(void); 55extern void synchronize_rcu_bh(void);
61extern void synchronize_sched(void); 56extern void synchronize_sched(void);
62extern void rcu_barrier(void); 57extern void rcu_barrier(void);
@@ -67,13 +62,6 @@ extern int sched_expedited_torture_stats(char *page);
67 62
68/* Internal to kernel */ 63/* Internal to kernel */
69extern void rcu_init(void); 64extern void rcu_init(void);
70extern void rcu_scheduler_starting(void);
71#ifndef CONFIG_TINY_RCU
72extern int rcu_needs_cpu(int cpu);
73#else
74static inline int rcu_needs_cpu(int cpu) { return 0; }
75#endif
76extern int rcu_scheduler_active;
77 65
78#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 66#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
79#include <linux/rcutree.h> 67#include <linux/rcutree.h>
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index a3b6272af2dd..c4ba9a78721e 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -39,6 +39,11 @@ void rcu_bh_qs(int cpu);
39#define rcu_init_sched() do { } while (0) 39#define rcu_init_sched() do { } while (0)
40extern void rcu_check_callbacks(int cpu, int user); 40extern void rcu_check_callbacks(int cpu, int user);
41 41
42static inline int rcu_needs_cpu(int cpu)
43{
44 return 0;
45}
46
42/* 47/*
43 * Return the number of grace periods. 48 * Return the number of grace periods.
44 */ 49 */
@@ -57,6 +62,8 @@ static inline long rcu_batches_completed_bh(void)
57 62
58extern int rcu_expedited_torture_stats(char *page); 63extern int rcu_expedited_torture_stats(char *page);
59 64
65#define synchronize_rcu synchronize_sched
66
60static inline void synchronize_rcu_expedited(void) 67static inline void synchronize_rcu_expedited(void)
61{ 68{
62 synchronize_sched(); 69 synchronize_sched();
@@ -86,6 +93,10 @@ static inline void rcu_exit_nohz(void)
86 93
87#endif /* #else #ifdef CONFIG_NO_HZ */ 94#endif /* #else #ifdef CONFIG_NO_HZ */
88 95
96static inline void rcu_scheduler_starting(void)
97{
98}
99
89static inline void exit_rcu(void) 100static inline void exit_rcu(void)
90{ 101{
91} 102}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 111a65257350..c93eee5911b0 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -35,12 +35,14 @@ struct notifier_block;
35extern void rcu_sched_qs(int cpu); 35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu); 36extern void rcu_bh_qs(int cpu);
37extern int rcu_needs_cpu(int cpu); 37extern int rcu_needs_cpu(int cpu);
38extern void rcu_scheduler_starting(void);
38extern int rcu_expedited_torture_stats(char *page); 39extern int rcu_expedited_torture_stats(char *page);
39 40
40#ifdef CONFIG_TREE_PREEMPT_RCU 41#ifdef CONFIG_TREE_PREEMPT_RCU
41 42
42extern void __rcu_read_lock(void); 43extern void __rcu_read_lock(void);
43extern void __rcu_read_unlock(void); 44extern void __rcu_read_unlock(void);
45extern void synchronize_rcu(void);
44extern void exit_rcu(void); 46extern void exit_rcu(void);
45 47
46#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
@@ -55,7 +57,7 @@ static inline void __rcu_read_unlock(void)
55 preempt_enable(); 57 preempt_enable();
56} 58}
57 59
58#define __synchronize_sched() synchronize_rcu() 60#define synchronize_rcu synchronize_sched
59 61
60static inline void exit_rcu(void) 62static inline void exit_rcu(void)
61{ 63{
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index eb6b534db318..9b7fd4723878 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,7 +44,6 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
48 47
49#ifdef CONFIG_DEBUG_LOCK_ALLOC 48#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key; 49static struct lock_class_key rcu_lock_key;
@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map =
53EXPORT_SYMBOL_GPL(rcu_lock_map); 52EXPORT_SYMBOL_GPL(rcu_lock_map);
54#endif 53#endif
55 54
56int rcu_scheduler_active __read_mostly;
57
58/* 55/*
59 * Awaken the corresponding synchronize_rcu() instance now that a 56 * Awaken the corresponding synchronize_rcu() instance now that a
60 * grace period has elapsed. 57 * grace period has elapsed.
@@ -66,104 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head)
66 rcu = container_of(head, struct rcu_synchronize, head); 63 rcu = container_of(head, struct rcu_synchronize, head);
67 complete(&rcu->completion); 64 complete(&rcu->completion);
68} 65}
69
70#ifndef CONFIG_TINY_RCU
71
72#ifdef CONFIG_TREE_PREEMPT_RCU
73
74/**
75 * synchronize_rcu - wait until a grace period has elapsed.
76 *
77 * Control will return to the caller some time after a full grace
78 * period has elapsed, in other words after all currently executing RCU
79 * read-side critical sections have completed. RCU read-side critical
80 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
81 * and may be nested.
82 */
83void synchronize_rcu(void)
84{
85 struct rcu_synchronize rcu;
86
87 if (!rcu_scheduler_active)
88 return;
89
90 init_completion(&rcu.completion);
91 /* Will wake me after RCU finished. */
92 call_rcu(&rcu.head, wakeme_after_rcu);
93 /* Wait for it. */
94 wait_for_completion(&rcu.completion);
95}
96EXPORT_SYMBOL_GPL(synchronize_rcu);
97
98#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
99
100/**
101 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
102 *
103 * Control will return to the caller some time after a full rcu-sched
104 * grace period has elapsed, in other words after all currently executing
105 * rcu-sched read-side critical sections have completed. These read-side
106 * critical sections are delimited by rcu_read_lock_sched() and
107 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
108 * local_irq_disable(), and so on may be used in place of
109 * rcu_read_lock_sched().
110 *
111 * This means that all preempt_disable code sequences, including NMI and
112 * hardware-interrupt handlers, in progress on entry will have completed
113 * before this primitive returns. However, this does not guarantee that
114 * softirq handlers will have completed, since in some kernels, these
115 * handlers can run in process context, and can block.
116 *
117 * This primitive provides the guarantees made by the (now removed)
118 * synchronize_kernel() API. In contrast, synchronize_rcu() only
119 * guarantees that rcu_read_lock() sections will have completed.
120 * In "classic RCU", these two guarantees happen to be one and
121 * the same, but can differ in realtime RCU implementations.
122 */
123void synchronize_sched(void)
124{
125 struct rcu_synchronize rcu;
126
127 if (rcu_blocking_is_gp())
128 return;
129
130 init_completion(&rcu.completion);
131 /* Will wake me after RCU finished. */
132 call_rcu_sched(&rcu.head, wakeme_after_rcu);
133 /* Wait for it. */
134 wait_for_completion(&rcu.completion);
135}
136EXPORT_SYMBOL_GPL(synchronize_sched);
137
138/**
139 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
140 *
141 * Control will return to the caller some time after a full rcu_bh grace
142 * period has elapsed, in other words after all currently executing rcu_bh
143 * read-side critical sections have completed. RCU read-side critical
144 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
145 * and may be nested.
146 */
147void synchronize_rcu_bh(void)
148{
149 struct rcu_synchronize rcu;
150
151 if (rcu_blocking_is_gp())
152 return;
153
154 init_completion(&rcu.completion);
155 /* Will wake me after RCU finished. */
156 call_rcu_bh(&rcu.head, wakeme_after_rcu);
157 /* Wait for it. */
158 wait_for_completion(&rcu.completion);
159}
160EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
161
162#endif /* #ifndef CONFIG_TINY_RCU */
163
164void rcu_scheduler_starting(void)
165{
166 WARN_ON(num_online_cpus() != 1);
167 WARN_ON(nr_context_switches() > 0);
168 rcu_scheduler_active = 1;
169}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index e3d3bbddbcd5..4ca7e0292fd8 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,6 +46,7 @@
46#include <linux/cpu.h> 46#include <linux/cpu.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/time.h> 48#include <linux/time.h>
49#include <linux/kernel_stat.h>
49 50
50#include "rcutree.h" 51#include "rcutree.h"
51 52
@@ -79,6 +80,8 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
79struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 80struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
80DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 81DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
81 82
83static int rcu_scheduler_active __read_mostly;
84
82 85
83/* 86/*
84 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 87 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
@@ -1396,6 +1399,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1396} 1399}
1397EXPORT_SYMBOL_GPL(call_rcu_bh); 1400EXPORT_SYMBOL_GPL(call_rcu_bh);
1398 1401
1402/**
1403 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
1404 *
1405 * Control will return to the caller some time after a full rcu-sched
1406 * grace period has elapsed, in other words after all currently executing
1407 * rcu-sched read-side critical sections have completed. These read-side
1408 * critical sections are delimited by rcu_read_lock_sched() and
1409 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
1410 * local_irq_disable(), and so on may be used in place of
1411 * rcu_read_lock_sched().
1412 *
1413 * This means that all preempt_disable code sequences, including NMI and
1414 * hardware-interrupt handlers, in progress on entry will have completed
1415 * before this primitive returns. However, this does not guarantee that
1416 * softirq handlers will have completed, since in some kernels, these
1417 * handlers can run in process context, and can block.
1418 *
1419 * This primitive provides the guarantees made by the (now removed)
1420 * synchronize_kernel() API. In contrast, synchronize_rcu() only
1421 * guarantees that rcu_read_lock() sections will have completed.
1422 * In "classic RCU", these two guarantees happen to be one and
1423 * the same, but can differ in realtime RCU implementations.
1424 */
1425void synchronize_sched(void)
1426{
1427 struct rcu_synchronize rcu;
1428
1429 if (rcu_blocking_is_gp())
1430 return;
1431
1432 init_completion(&rcu.completion);
1433 /* Will wake me after RCU finished. */
1434 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1435 /* Wait for it. */
1436 wait_for_completion(&rcu.completion);
1437}
1438EXPORT_SYMBOL_GPL(synchronize_sched);
1439
1440/**
1441 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
1442 *
1443 * Control will return to the caller some time after a full rcu_bh grace
1444 * period has elapsed, in other words after all currently executing rcu_bh
1445 * read-side critical sections have completed. RCU read-side critical
1446 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
1447 * and may be nested.
1448 */
1449void synchronize_rcu_bh(void)
1450{
1451 struct rcu_synchronize rcu;
1452
1453 if (rcu_blocking_is_gp())
1454 return;
1455
1456 init_completion(&rcu.completion);
1457 /* Will wake me after RCU finished. */
1458 call_rcu_bh(&rcu.head, wakeme_after_rcu);
1459 /* Wait for it. */
1460 wait_for_completion(&rcu.completion);
1461}
1462EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1463
1399/* 1464/*
1400 * Check to see if there is any immediate RCU-related work to be done 1465 * Check to see if there is any immediate RCU-related work to be done
1401 * by the current CPU, for the specified type of RCU, returning 1 if so. 1466 * by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -1480,6 +1545,21 @@ int rcu_needs_cpu(int cpu)
1480 rcu_preempt_needs_cpu(cpu); 1545 rcu_preempt_needs_cpu(cpu);
1481} 1546}
1482 1547
1548/*
1549 * This function is invoked towards the end of the scheduler's initialization
1550 * process. Before this is called, the idle task might contain
1551 * RCU read-side critical sections (during which time, this idle
1552 * task is booting the system). After this function is called, the
1553 * idle tasks are prohibited from containing RCU read-side critical
1554 * sections.
1555 */
1556void rcu_scheduler_starting(void)
1557{
1558 WARN_ON(num_online_cpus() != 1);
1559 WARN_ON(nr_context_switches() > 0);
1560 rcu_scheduler_active = 1;
1561}
1562
1483static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; 1563static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1484static atomic_t rcu_barrier_cpu_count; 1564static atomic_t rcu_barrier_cpu_count;
1485static DEFINE_MUTEX(rcu_barrier_mutex); 1565static DEFINE_MUTEX(rcu_barrier_mutex);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 0bdb592eee66..1d295c789d3d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -425,6 +425,30 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
425} 425}
426EXPORT_SYMBOL_GPL(call_rcu); 426EXPORT_SYMBOL_GPL(call_rcu);
427 427
428/**
429 * synchronize_rcu - wait until a grace period has elapsed.
430 *
431 * Control will return to the caller some time after a full grace
432 * period has elapsed, in other words after all currently executing RCU
433 * read-side critical sections have completed. RCU read-side critical
434 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
435 * and may be nested.
436 */
437void synchronize_rcu(void)
438{
439 struct rcu_synchronize rcu;
440
441 if (!rcu_scheduler_active)
442 return;
443
444 init_completion(&rcu.completion);
445 /* Will wake me after RCU finished. */
446 call_rcu(&rcu.head, wakeme_after_rcu);
447 /* Wait for it. */
448 wait_for_completion(&rcu.completion);
449}
450EXPORT_SYMBOL_GPL(synchronize_rcu);
451
428/* 452/*
429 * Wait for an rcu-preempt grace period. We are supposed to expedite the 453 * Wait for an rcu-preempt grace period. We are supposed to expedite the
430 * grace period, but this is the crude slow compatability hack, so just 454 * grace period, but this is the crude slow compatability hack, so just