aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcupdate.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r--kernel/rcupdate.c104
1 files changed, 0 insertions, 104 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index eb6b534db318..9b7fd4723878 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,7 +44,6 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
48 47
49#ifdef CONFIG_DEBUG_LOCK_ALLOC 48#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key; 49static struct lock_class_key rcu_lock_key;
@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map =
53EXPORT_SYMBOL_GPL(rcu_lock_map); 52EXPORT_SYMBOL_GPL(rcu_lock_map);
54#endif 53#endif
55 54
56int rcu_scheduler_active __read_mostly;
57
58/* 55/*
59 * Awaken the corresponding synchronize_rcu() instance now that a 56 * Awaken the corresponding synchronize_rcu() instance now that a
60 * grace period has elapsed. 57 * grace period has elapsed.
@@ -66,104 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head)
66 rcu = container_of(head, struct rcu_synchronize, head); 63 rcu = container_of(head, struct rcu_synchronize, head);
67 complete(&rcu->completion); 64 complete(&rcu->completion);
68} 65}
69
70#ifndef CONFIG_TINY_RCU
71
72#ifdef CONFIG_TREE_PREEMPT_RCU
73
74/**
75 * synchronize_rcu - wait until a grace period has elapsed.
76 *
77 * Control will return to the caller some time after a full grace
78 * period has elapsed, in other words after all currently executing RCU
79 * read-side critical sections have completed. RCU read-side critical
80 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
81 * and may be nested.
82 */
83void synchronize_rcu(void)
84{
85 struct rcu_synchronize rcu;
86
87 if (!rcu_scheduler_active)
88 return;
89
90 init_completion(&rcu.completion);
91 /* Will wake me after RCU finished. */
92 call_rcu(&rcu.head, wakeme_after_rcu);
93 /* Wait for it. */
94 wait_for_completion(&rcu.completion);
95}
96EXPORT_SYMBOL_GPL(synchronize_rcu);
97
98#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
99
100/**
101 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
102 *
103 * Control will return to the caller some time after a full rcu-sched
104 * grace period has elapsed, in other words after all currently executing
105 * rcu-sched read-side critical sections have completed. These read-side
106 * critical sections are delimited by rcu_read_lock_sched() and
107 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
108 * local_irq_disable(), and so on may be used in place of
109 * rcu_read_lock_sched().
110 *
111 * This means that all preempt_disable code sequences, including NMI and
112 * hardware-interrupt handlers, in progress on entry will have completed
113 * before this primitive returns. However, this does not guarantee that
114 * softirq handlers will have completed, since in some kernels, these
115 * handlers can run in process context, and can block.
116 *
117 * This primitive provides the guarantees made by the (now removed)
118 * synchronize_kernel() API. In contrast, synchronize_rcu() only
119 * guarantees that rcu_read_lock() sections will have completed.
120 * In "classic RCU", these two guarantees happen to be one and
121 * the same, but can differ in realtime RCU implementations.
122 */
123void synchronize_sched(void)
124{
125 struct rcu_synchronize rcu;
126
127 if (rcu_blocking_is_gp())
128 return;
129
130 init_completion(&rcu.completion);
131 /* Will wake me after RCU finished. */
132 call_rcu_sched(&rcu.head, wakeme_after_rcu);
133 /* Wait for it. */
134 wait_for_completion(&rcu.completion);
135}
136EXPORT_SYMBOL_GPL(synchronize_sched);
137
138/**
139 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
140 *
141 * Control will return to the caller some time after a full rcu_bh grace
142 * period has elapsed, in other words after all currently executing rcu_bh
143 * read-side critical sections have completed. RCU read-side critical
144 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
145 * and may be nested.
146 */
147void synchronize_rcu_bh(void)
148{
149 struct rcu_synchronize rcu;
150
151 if (rcu_blocking_is_gp())
152 return;
153
154 init_completion(&rcu.completion);
155 /* Will wake me after RCU finished. */
156 call_rcu_bh(&rcu.head, wakeme_after_rcu);
157 /* Wait for it. */
158 wait_for_completion(&rcu.completion);
159}
160EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
161
162#endif /* #ifndef CONFIG_TINY_RCU */
163
164void rcu_scheduler_starting(void)
165{
166 WARN_ON(num_online_cpus() != 1);
167 WARN_ON(nr_context_switches() > 0);
168 rcu_scheduler_active = 1;
169}