diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 143 |
1 files changed, 89 insertions, 54 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 15fbb3ca634d..3ebd0b7bcb08 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | 2 | * Read-Copy Update mechanism for mutual exclusion |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
@@ -18,7 +18,7 @@ | |||
18 | * Copyright IBM Corporation, 2001 | 18 | * Copyright IBM Corporation, 2001 |
19 | * | 19 | * |
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
21 | * | 21 | * |
22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | 22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
24 | * Papers: | 24 | * Papers: |
@@ -26,7 +26,7 @@ | |||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | 26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
27 | * | 27 | * |
28 | * For detailed explanation of Read-Copy Update mechanism see - | 28 | * For detailed explanation of Read-Copy Update mechanism see - |
29 | * http://lse.sourceforge.net/locking/rcupdate.html | 29 | * http://lse.sourceforge.net/locking/rcupdate.html |
30 | * | 30 | * |
31 | */ | 31 | */ |
32 | 32 | ||
@@ -51,25 +51,48 @@ struct rcu_head { | |||
51 | void (*func)(struct rcu_head *head); | 51 | void (*func)(struct rcu_head *head); |
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* Internal to kernel, but needed by rcupreempt.h. */ | 54 | /* Exported common interfaces */ |
55 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
56 | extern void synchronize_rcu(void); | ||
57 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
58 | #define synchronize_rcu synchronize_sched | ||
59 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
60 | extern void synchronize_rcu_bh(void); | ||
61 | extern void synchronize_sched(void); | ||
62 | extern void rcu_barrier(void); | ||
63 | extern void rcu_barrier_bh(void); | ||
64 | extern void rcu_barrier_sched(void); | ||
65 | extern void synchronize_sched_expedited(void); | ||
66 | extern int sched_expedited_torture_stats(char *page); | ||
67 | |||
68 | /* Internal to kernel */ | ||
69 | extern void rcu_init(void); | ||
70 | extern void rcu_scheduler_starting(void); | ||
71 | extern int rcu_needs_cpu(int cpu); | ||
55 | extern int rcu_scheduler_active; | 72 | extern int rcu_scheduler_active; |
56 | 73 | ||
57 | #if defined(CONFIG_CLASSIC_RCU) | 74 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
58 | #include <linux/rcuclassic.h> | ||
59 | #elif defined(CONFIG_TREE_RCU) | ||
60 | #include <linux/rcutree.h> | 75 | #include <linux/rcutree.h> |
61 | #elif defined(CONFIG_PREEMPT_RCU) | ||
62 | #include <linux/rcupreempt.h> | ||
63 | #else | 76 | #else |
64 | #error "Unknown RCU implementation specified to kernel configuration" | 77 | #error "Unknown RCU implementation specified to kernel configuration" |
65 | #endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ | 78 | #endif |
66 | 79 | ||
67 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | 80 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
68 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | 81 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
69 | #define INIT_RCU_HEAD(ptr) do { \ | 82 | #define INIT_RCU_HEAD(ptr) do { \ |
70 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 83 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
71 | } while (0) | 84 | } while (0) |
72 | 85 | ||
86 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
87 | extern struct lockdep_map rcu_lock_map; | ||
88 | # define rcu_read_acquire() \ | ||
89 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
90 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
91 | #else | ||
92 | # define rcu_read_acquire() do { } while (0) | ||
93 | # define rcu_read_release() do { } while (0) | ||
94 | #endif | ||
95 | |||
73 | /** | 96 | /** |
74 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 97 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
75 | * | 98 | * |
@@ -99,13 +122,12 @@ extern int rcu_scheduler_active; | |||
99 | * | 122 | * |
100 | * It is illegal to block while in an RCU read-side critical section. | 123 | * It is illegal to block while in an RCU read-side critical section. |
101 | */ | 124 | */ |
102 | #define rcu_read_lock() __rcu_read_lock() | 125 | static inline void rcu_read_lock(void) |
103 | 126 | { | |
104 | /** | 127 | __rcu_read_lock(); |
105 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | 128 | __acquire(RCU); |
106 | * | 129 | rcu_read_acquire(); |
107 | * See rcu_read_lock() for more information. | 130 | } |
108 | */ | ||
109 | 131 | ||
110 | /* | 132 | /* |
111 | * So where is rcu_write_lock()? It does not exist, as there is no | 133 | * So where is rcu_write_lock()? It does not exist, as there is no |
@@ -116,7 +138,18 @@ extern int rcu_scheduler_active; | |||
116 | * used as well. RCU does not care how the writers keep out of each | 138 | * used as well. RCU does not care how the writers keep out of each |
117 | * others' way, as long as they do so. | 139 | * others' way, as long as they do so. |
118 | */ | 140 | */ |
119 | #define rcu_read_unlock() __rcu_read_unlock() | 141 | |
142 | /** | ||
143 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | ||
144 | * | ||
145 | * See rcu_read_lock() for more information. | ||
146 | */ | ||
147 | static inline void rcu_read_unlock(void) | ||
148 | { | ||
149 | rcu_read_release(); | ||
150 | __release(RCU); | ||
151 | __rcu_read_unlock(); | ||
152 | } | ||
120 | 153 | ||
121 | /** | 154 | /** |
122 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 155 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section |
@@ -129,14 +162,24 @@ extern int rcu_scheduler_active; | |||
129 | * can use just rcu_read_lock(). | 162 | * can use just rcu_read_lock(). |
130 | * | 163 | * |
131 | */ | 164 | */ |
132 | #define rcu_read_lock_bh() __rcu_read_lock_bh() | 165 | static inline void rcu_read_lock_bh(void) |
166 | { | ||
167 | __rcu_read_lock_bh(); | ||
168 | __acquire(RCU_BH); | ||
169 | rcu_read_acquire(); | ||
170 | } | ||
133 | 171 | ||
134 | /* | 172 | /* |
135 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section | 173 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
136 | * | 174 | * |
137 | * See rcu_read_lock_bh() for more information. | 175 | * See rcu_read_lock_bh() for more information. |
138 | */ | 176 | */ |
139 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() | 177 | static inline void rcu_read_unlock_bh(void) |
178 | { | ||
179 | rcu_read_release(); | ||
180 | __release(RCU_BH); | ||
181 | __rcu_read_unlock_bh(); | ||
182 | } | ||
140 | 183 | ||
141 | /** | 184 | /** |
142 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | 185 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section |
@@ -147,17 +190,38 @@ extern int rcu_scheduler_active; | |||
147 | * - call_rcu_sched() and rcu_barrier_sched() | 190 | * - call_rcu_sched() and rcu_barrier_sched() |
148 | * on the write-side to insure proper synchronization. | 191 | * on the write-side to insure proper synchronization. |
149 | */ | 192 | */ |
150 | #define rcu_read_lock_sched() preempt_disable() | 193 | static inline void rcu_read_lock_sched(void) |
151 | #define rcu_read_lock_sched_notrace() preempt_disable_notrace() | 194 | { |
195 | preempt_disable(); | ||
196 | __acquire(RCU_SCHED); | ||
197 | rcu_read_acquire(); | ||
198 | } | ||
199 | |||
200 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ | ||
201 | static inline notrace void rcu_read_lock_sched_notrace(void) | ||
202 | { | ||
203 | preempt_disable_notrace(); | ||
204 | __acquire(RCU_SCHED); | ||
205 | } | ||
152 | 206 | ||
153 | /* | 207 | /* |
154 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section | 208 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section |
155 | * | 209 | * |
156 | * See rcu_read_lock_sched for more information. | 210 | * See rcu_read_lock_sched for more information. |
157 | */ | 211 | */ |
158 | #define rcu_read_unlock_sched() preempt_enable() | 212 | static inline void rcu_read_unlock_sched(void) |
159 | #define rcu_read_unlock_sched_notrace() preempt_enable_notrace() | 213 | { |
214 | rcu_read_release(); | ||
215 | __release(RCU_SCHED); | ||
216 | preempt_enable(); | ||
217 | } | ||
160 | 218 | ||
219 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ | ||
220 | static inline notrace void rcu_read_unlock_sched_notrace(void) | ||
221 | { | ||
222 | __release(RCU_SCHED); | ||
223 | preempt_enable_notrace(); | ||
224 | } | ||
161 | 225 | ||
162 | 226 | ||
163 | /** | 227 | /** |
@@ -207,24 +271,6 @@ struct rcu_synchronize { | |||
207 | extern void wakeme_after_rcu(struct rcu_head *head); | 271 | extern void wakeme_after_rcu(struct rcu_head *head); |
208 | 272 | ||
209 | /** | 273 | /** |
210 | * synchronize_sched - block until all CPUs have exited any non-preemptive | ||
211 | * kernel code sequences. | ||
212 | * | ||
213 | * This means that all preempt_disable code sequences, including NMI and | ||
214 | * hardware-interrupt handlers, in progress on entry will have completed | ||
215 | * before this primitive returns. However, this does not guarantee that | ||
216 | * softirq handlers will have completed, since in some kernels, these | ||
217 | * handlers can run in process context, and can block. | ||
218 | * | ||
219 | * This primitive provides the guarantees made by the (now removed) | ||
220 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
221 | * guarantees that rcu_read_lock() sections will have completed. | ||
222 | * In "classic RCU", these two guarantees happen to be one and | ||
223 | * the same, but can differ in realtime RCU implementations. | ||
224 | */ | ||
225 | #define synchronize_sched() __synchronize_sched() | ||
226 | |||
227 | /** | ||
228 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 274 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
229 | * @head: structure to be used for queueing the RCU updates. | 275 | * @head: structure to be used for queueing the RCU updates. |
230 | * @func: actual update function to be invoked after the grace period | 276 | * @func: actual update function to be invoked after the grace period |
@@ -259,15 +305,4 @@ extern void call_rcu(struct rcu_head *head, | |||
259 | extern void call_rcu_bh(struct rcu_head *head, | 305 | extern void call_rcu_bh(struct rcu_head *head, |
260 | void (*func)(struct rcu_head *head)); | 306 | void (*func)(struct rcu_head *head)); |
261 | 307 | ||
262 | /* Exported common interfaces */ | ||
263 | extern void synchronize_rcu(void); | ||
264 | extern void rcu_barrier(void); | ||
265 | extern void rcu_barrier_bh(void); | ||
266 | extern void rcu_barrier_sched(void); | ||
267 | |||
268 | /* Internal to kernel */ | ||
269 | extern void rcu_init(void); | ||
270 | extern void rcu_scheduler_starting(void); | ||
271 | extern int rcu_needs_cpu(int cpu); | ||
272 | |||
273 | #endif /* __LINUX_RCUPDATE_H */ | 308 | #endif /* __LINUX_RCUPDATE_H */ |