diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/rcuclassic.h | 161 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 168 |
2 files changed, 210 insertions, 119 deletions
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h new file mode 100644 index 000000000000..2b8b045a51d5 --- /dev/null +++ b/include/linux/rcuclassic.h | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (classic version) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2001 | ||
19 | * | ||
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of Read-Copy Update mechanism see - | ||
29 | * Documentation/RCU | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #ifndef __LINUX_RCUCLASSIC_H | ||
34 | #define __LINUX_RCUCLASSIC_H | ||
35 | |||
36 | #ifdef __KERNEL__ | ||
37 | |||
38 | #include <linux/cache.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/threads.h> | ||
41 | #include <linux/percpu.h> | ||
42 | #include <linux/cpumask.h> | ||
43 | #include <linux/seqlock.h> | ||
44 | |||
45 | |||
46 | /* Global control variables for rcupdate callback mechanism. */ | ||
47 | struct rcu_ctrlblk { | ||
48 | long cur; /* Current batch number. */ | ||
49 | long completed; /* Number of the last completed batch */ | ||
50 | int next_pending; /* Is the next batch already waiting? */ | ||
51 | |||
52 | int signaled; | ||
53 | |||
54 | spinlock_t lock ____cacheline_internodealigned_in_smp; | ||
55 | cpumask_t cpumask; /* CPUs that need to switch in order */ | ||
56 | /* for current batch to proceed. */ | ||
57 | } ____cacheline_internodealigned_in_smp; | ||
58 | |||
59 | /* Is batch a before batch b ? */ | ||
60 | static inline int rcu_batch_before(long a, long b) | ||
61 | { | ||
62 | return (a - b) < 0; | ||
63 | } | ||
64 | |||
65 | /* Is batch a after batch b ? */ | ||
66 | static inline int rcu_batch_after(long a, long b) | ||
67 | { | ||
68 | return (a - b) > 0; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Per-CPU data for Read-Copy UPdate. | ||
73 | * nxtlist - new callbacks are added here | ||
74 | * curlist - current batch for which quiescent cycle started if any | ||
75 | */ | ||
76 | struct rcu_data { | ||
77 | /* 1) quiescent state handling : */ | ||
78 | long quiescbatch; /* Batch # for grace period */ | ||
79 | int passed_quiesc; /* User-mode/idle loop etc. */ | ||
80 | int qs_pending; /* core waits for quiesc state */ | ||
81 | |||
82 | /* 2) batch handling */ | ||
83 | long batch; /* Batch # for current RCU batch */ | ||
84 | struct rcu_head *nxtlist; | ||
85 | struct rcu_head **nxttail; | ||
86 | long qlen; /* # of queued callbacks */ | ||
87 | struct rcu_head *curlist; | ||
88 | struct rcu_head **curtail; | ||
89 | struct rcu_head *donelist; | ||
90 | struct rcu_head **donetail; | ||
91 | long blimit; /* Upper limit on a processed batch */ | ||
92 | int cpu; | ||
93 | struct rcu_head barrier; | ||
94 | }; | ||
95 | |||
96 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
97 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
98 | |||
99 | /* | ||
100 | * Increment the quiescent state counter. | ||
101 | * The counter is a bit degenerated: We do not need to know | ||
102 | * how many quiescent states passed, just if there was at least | ||
103 | * one since the start of the grace period. Thus just a flag. | ||
104 | */ | ||
105 | static inline void rcu_qsctr_inc(int cpu) | ||
106 | { | ||
107 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
108 | rdp->passed_quiesc = 1; | ||
109 | } | ||
110 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
111 | { | ||
112 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
113 | rdp->passed_quiesc = 1; | ||
114 | } | ||
115 | |||
116 | extern int rcu_pending(int cpu); | ||
117 | extern int rcu_needs_cpu(int cpu); | ||
118 | |||
119 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
120 | extern struct lockdep_map rcu_lock_map; | ||
121 | # define rcu_read_acquire() \ | ||
122 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | ||
123 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
124 | #else | ||
125 | # define rcu_read_acquire() do { } while (0) | ||
126 | # define rcu_read_release() do { } while (0) | ||
127 | #endif | ||
128 | |||
129 | #define __rcu_read_lock() \ | ||
130 | do { \ | ||
131 | preempt_disable(); \ | ||
132 | __acquire(RCU); \ | ||
133 | rcu_read_acquire(); \ | ||
134 | } while (0) | ||
135 | #define __rcu_read_unlock() \ | ||
136 | do { \ | ||
137 | rcu_read_release(); \ | ||
138 | __release(RCU); \ | ||
139 | preempt_enable(); \ | ||
140 | } while (0) | ||
141 | #define __rcu_read_lock_bh() \ | ||
142 | do { \ | ||
143 | local_bh_disable(); \ | ||
144 | __acquire(RCU_BH); \ | ||
145 | rcu_read_acquire(); \ | ||
146 | } while (0) | ||
147 | #define __rcu_read_unlock_bh() \ | ||
148 | do { \ | ||
149 | rcu_read_release(); \ | ||
150 | __release(RCU_BH); \ | ||
151 | local_bh_enable(); \ | ||
152 | } while (0) | ||
153 | |||
154 | #define __synchronize_sched() synchronize_rcu() | ||
155 | |||
156 | extern void __rcu_init(void); | ||
157 | extern void rcu_check_callbacks(int cpu, int user); | ||
158 | extern void rcu_restart_cpu(int cpu); | ||
159 | |||
160 | #endif /* __KERNEL__ */ | ||
161 | #endif /* __LINUX_RCUCLASSIC_H */ | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index cc24a01df940..12aa13e13150 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * | 17 | * |
18 | * Copyright (C) IBM Corporation, 2001 | 18 | * Copyright IBM Corporation, 2001 |
19 | * | 19 | * |
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
21 | * | 21 | * |
@@ -53,96 +53,14 @@ struct rcu_head { | |||
53 | void (*func)(struct rcu_head *head); | 53 | void (*func)(struct rcu_head *head); |
54 | }; | 54 | }; |
55 | 55 | ||
56 | #include <linux/rcuclassic.h> | ||
57 | |||
56 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | 58 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
57 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | 59 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
58 | #define INIT_RCU_HEAD(ptr) do { \ | 60 | #define INIT_RCU_HEAD(ptr) do { \ |
59 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 61 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
60 | } while (0) | 62 | } while (0) |
61 | 63 | ||
62 | |||
63 | |||
64 | /* Global control variables for rcupdate callback mechanism. */ | ||
65 | struct rcu_ctrlblk { | ||
66 | long cur; /* Current batch number. */ | ||
67 | long completed; /* Number of the last completed batch */ | ||
68 | int next_pending; /* Is the next batch already waiting? */ | ||
69 | |||
70 | int signaled; | ||
71 | |||
72 | spinlock_t lock ____cacheline_internodealigned_in_smp; | ||
73 | cpumask_t cpumask; /* CPUs that need to switch in order */ | ||
74 | /* for current batch to proceed. */ | ||
75 | } ____cacheline_internodealigned_in_smp; | ||
76 | |||
77 | /* Is batch a before batch b ? */ | ||
78 | static inline int rcu_batch_before(long a, long b) | ||
79 | { | ||
80 | return (a - b) < 0; | ||
81 | } | ||
82 | |||
83 | /* Is batch a after batch b ? */ | ||
84 | static inline int rcu_batch_after(long a, long b) | ||
85 | { | ||
86 | return (a - b) > 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Per-CPU data for Read-Copy UPdate. | ||
91 | * nxtlist - new callbacks are added here | ||
92 | * curlist - current batch for which quiescent cycle started if any | ||
93 | */ | ||
94 | struct rcu_data { | ||
95 | /* 1) quiescent state handling : */ | ||
96 | long quiescbatch; /* Batch # for grace period */ | ||
97 | int passed_quiesc; /* User-mode/idle loop etc. */ | ||
98 | int qs_pending; /* core waits for quiesc state */ | ||
99 | |||
100 | /* 2) batch handling */ | ||
101 | long batch; /* Batch # for current RCU batch */ | ||
102 | struct rcu_head *nxtlist; | ||
103 | struct rcu_head **nxttail; | ||
104 | long qlen; /* # of queued callbacks */ | ||
105 | struct rcu_head *curlist; | ||
106 | struct rcu_head **curtail; | ||
107 | struct rcu_head *donelist; | ||
108 | struct rcu_head **donetail; | ||
109 | long blimit; /* Upper limit on a processed batch */ | ||
110 | int cpu; | ||
111 | struct rcu_head barrier; | ||
112 | }; | ||
113 | |||
114 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
115 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
116 | |||
117 | /* | ||
118 | * Increment the quiescent state counter. | ||
119 | * The counter is a bit degenerated: We do not need to know | ||
120 | * how many quiescent states passed, just if there was at least | ||
121 | * one since the start of the grace period. Thus just a flag. | ||
122 | */ | ||
123 | static inline void rcu_qsctr_inc(int cpu) | ||
124 | { | ||
125 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
126 | rdp->passed_quiesc = 1; | ||
127 | } | ||
128 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
129 | { | ||
130 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
131 | rdp->passed_quiesc = 1; | ||
132 | } | ||
133 | |||
134 | extern int rcu_pending(int cpu); | ||
135 | extern int rcu_needs_cpu(int cpu); | ||
136 | |||
137 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
138 | extern struct lockdep_map rcu_lock_map; | ||
139 | # define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | ||
140 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
141 | #else | ||
142 | # define rcu_read_acquire() do { } while (0) | ||
143 | # define rcu_read_release() do { } while (0) | ||
144 | #endif | ||
145 | |||
146 | /** | 64 | /** |
147 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 65 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
148 | * | 66 | * |
@@ -172,24 +90,13 @@ extern struct lockdep_map rcu_lock_map; | |||
172 | * | 90 | * |
173 | * It is illegal to block while in an RCU read-side critical section. | 91 | * It is illegal to block while in an RCU read-side critical section. |
174 | */ | 92 | */ |
175 | #define rcu_read_lock() \ | 93 | #define rcu_read_lock() __rcu_read_lock() |
176 | do { \ | ||
177 | preempt_disable(); \ | ||
178 | __acquire(RCU); \ | ||
179 | rcu_read_acquire(); \ | ||
180 | } while(0) | ||
181 | 94 | ||
182 | /** | 95 | /** |
183 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | 96 | * rcu_read_unlock - marks the end of an RCU read-side critical section. |
184 | * | 97 | * |
185 | * See rcu_read_lock() for more information. | 98 | * See rcu_read_lock() for more information. |
186 | */ | 99 | */ |
187 | #define rcu_read_unlock() \ | ||
188 | do { \ | ||
189 | rcu_read_release(); \ | ||
190 | __release(RCU); \ | ||
191 | preempt_enable(); \ | ||
192 | } while(0) | ||
193 | 100 | ||
194 | /* | 101 | /* |
195 | * So where is rcu_write_lock()? It does not exist, as there is no | 102 | * So where is rcu_write_lock()? It does not exist, as there is no |
@@ -200,6 +107,7 @@ extern struct lockdep_map rcu_lock_map; | |||
200 | * used as well. RCU does not care how the writers keep out of each | 107 | * used as well. RCU does not care how the writers keep out of each |
201 | * others' way, as long as they do so. | 108 | * others' way, as long as they do so. |
202 | */ | 109 | */ |
110 | #define rcu_read_unlock() __rcu_read_unlock() | ||
203 | 111 | ||
204 | /** | 112 | /** |
205 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 113 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section |
@@ -212,24 +120,14 @@ extern struct lockdep_map rcu_lock_map; | |||
212 | * can use just rcu_read_lock(). | 120 | * can use just rcu_read_lock(). |
213 | * | 121 | * |
214 | */ | 122 | */ |
215 | #define rcu_read_lock_bh() \ | 123 | #define rcu_read_lock_bh() __rcu_read_lock_bh() |
216 | do { \ | ||
217 | local_bh_disable(); \ | ||
218 | __acquire(RCU_BH); \ | ||
219 | rcu_read_acquire(); \ | ||
220 | } while(0) | ||
221 | 124 | ||
222 | /* | 125 | /* |
223 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section | 126 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
224 | * | 127 | * |
225 | * See rcu_read_lock_bh() for more information. | 128 | * See rcu_read_lock_bh() for more information. |
226 | */ | 129 | */ |
227 | #define rcu_read_unlock_bh() \ | 130 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() |
228 | do { \ | ||
229 | rcu_read_release(); \ | ||
230 | __release(RCU_BH); \ | ||
231 | local_bh_enable(); \ | ||
232 | } while(0) | ||
233 | 131 | ||
234 | /* | 132 | /* |
235 | * Prevent the compiler from merging or refetching accesses. The compiler | 133 | * Prevent the compiler from merging or refetching accesses. The compiler |
@@ -293,21 +191,53 @@ extern struct lockdep_map rcu_lock_map; | |||
293 | * In "classic RCU", these two guarantees happen to be one and | 191 | * In "classic RCU", these two guarantees happen to be one and |
294 | * the same, but can differ in realtime RCU implementations. | 192 | * the same, but can differ in realtime RCU implementations. |
295 | */ | 193 | */ |
296 | #define synchronize_sched() synchronize_rcu() | 194 | #define synchronize_sched() __synchronize_sched() |
195 | |||
196 | /** | ||
197 | * call_rcu - Queue an RCU callback for invocation after a grace period. | ||
198 | * @head: structure to be used for queueing the RCU updates. | ||
199 | * @func: actual update function to be invoked after the grace period | ||
200 | * | ||
201 | * The update function will be invoked some time after a full grace | ||
202 | * period elapses, in other words after all currently executing RCU | ||
203 | * read-side critical sections have completed. RCU read-side critical | ||
204 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
205 | * and may be nested. | ||
206 | */ | ||
207 | extern void call_rcu(struct rcu_head *head, | ||
208 | void (*func)(struct rcu_head *head)); | ||
209 | |||
210 | /** | ||
211 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | ||
212 | * @head: structure to be used for queueing the RCU updates. | ||
213 | * @func: actual update function to be invoked after the grace period | ||
214 | * | ||
215 | * The update function will be invoked some time after a full grace | ||
216 | * period elapses, in other words after all currently executing RCU | ||
217 | * read-side critical sections have completed. call_rcu_bh() assumes | ||
218 | * that the read-side critical sections end on completion of a softirq | ||
219 | * handler. This means that read-side critical sections in process | ||
220 | * context must not be interrupted by softirqs. This interface is to be | ||
221 | * used when most of the read-side critical sections are in softirq context. | ||
222 | * RCU read-side critical sections are delimited by : | ||
223 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. | ||
224 | * OR | ||
225 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | ||
226 | * These may be nested. | ||
227 | */ | ||
228 | extern void call_rcu_bh(struct rcu_head *head, | ||
229 | void (*func)(struct rcu_head *head)); | ||
230 | |||
231 | /* Exported common interfaces */ | ||
232 | extern void synchronize_rcu(void); | ||
233 | extern void rcu_barrier(void); | ||
297 | 234 | ||
235 | /* Internal to kernel */ | ||
298 | extern void rcu_init(void); | 236 | extern void rcu_init(void); |
299 | extern void rcu_check_callbacks(int cpu, int user); | 237 | extern void rcu_check_callbacks(int cpu, int user); |
300 | extern void rcu_restart_cpu(int cpu); | 238 | |
301 | extern long rcu_batches_completed(void); | 239 | extern long rcu_batches_completed(void); |
302 | extern long rcu_batches_completed_bh(void); | 240 | extern long rcu_batches_completed_bh(void); |
303 | 241 | ||
304 | /* Exported interfaces */ | ||
305 | extern void FASTCALL(call_rcu(struct rcu_head *head, | ||
306 | void (*func)(struct rcu_head *head))); | ||
307 | extern void FASTCALL(call_rcu_bh(struct rcu_head *head, | ||
308 | void (*func)(struct rcu_head *head))); | ||
309 | extern void synchronize_rcu(void); | ||
310 | extern void rcu_barrier(void); | ||
311 | |||
312 | #endif /* __KERNEL__ */ | 242 | #endif /* __KERNEL__ */ |
313 | #endif /* __LINUX_RCUPDATE_H */ | 243 | #endif /* __LINUX_RCUPDATE_H */ |