aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rcupdate.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r--include/linux/rcupdate.h173
1 files changed, 53 insertions, 120 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index cc24a01df940..d32c14de270e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -15,7 +15,7 @@
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * 17 *
18 * Copyright (C) IBM Corporation, 2001 18 * Copyright IBM Corporation, 2001
19 * 19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 * 21 *
@@ -53,96 +53,18 @@ struct rcu_head {
53 void (*func)(struct rcu_head *head); 53 void (*func)(struct rcu_head *head);
54}; 54};
55 55
56#ifdef CONFIG_CLASSIC_RCU
57#include <linux/rcuclassic.h>
58#else /* #ifdef CONFIG_CLASSIC_RCU */
59#include <linux/rcupreempt.h>
60#endif /* #else #ifdef CONFIG_CLASSIC_RCU */
61
56#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 62#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
57#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 63#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
58#define INIT_RCU_HEAD(ptr) do { \ 64#define INIT_RCU_HEAD(ptr) do { \
59 (ptr)->next = NULL; (ptr)->func = NULL; \ 65 (ptr)->next = NULL; (ptr)->func = NULL; \
60} while (0) 66} while (0)
61 67
62
63
64/* Global control variables for rcupdate callback mechanism. */
65struct rcu_ctrlblk {
66 long cur; /* Current batch number. */
67 long completed; /* Number of the last completed batch */
68 int next_pending; /* Is the next batch already waiting? */
69
70 int signaled;
71
72 spinlock_t lock ____cacheline_internodealigned_in_smp;
73 cpumask_t cpumask; /* CPUs that need to switch in order */
74 /* for current batch to proceed. */
75} ____cacheline_internodealigned_in_smp;
76
77/* Is batch a before batch b ? */
78static inline int rcu_batch_before(long a, long b)
79{
80 return (a - b) < 0;
81}
82
83/* Is batch a after batch b ? */
84static inline int rcu_batch_after(long a, long b)
85{
86 return (a - b) > 0;
87}
88
89/*
90 * Per-CPU data for Read-Copy UPdate.
91 * nxtlist - new callbacks are added here
92 * curlist - current batch for which quiescent cycle started if any
93 */
94struct rcu_data {
95 /* 1) quiescent state handling : */
96 long quiescbatch; /* Batch # for grace period */
97 int passed_quiesc; /* User-mode/idle loop etc. */
98 int qs_pending; /* core waits for quiesc state */
99
100 /* 2) batch handling */
101 long batch; /* Batch # for current RCU batch */
102 struct rcu_head *nxtlist;
103 struct rcu_head **nxttail;
104 long qlen; /* # of queued callbacks */
105 struct rcu_head *curlist;
106 struct rcu_head **curtail;
107 struct rcu_head *donelist;
108 struct rcu_head **donetail;
109 long blimit; /* Upper limit on a processed batch */
110 int cpu;
111 struct rcu_head barrier;
112};
113
114DECLARE_PER_CPU(struct rcu_data, rcu_data);
115DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
116
117/*
118 * Increment the quiescent state counter.
119 * The counter is a bit degenerated: We do not need to know
120 * how many quiescent states passed, just if there was at least
121 * one since the start of the grace period. Thus just a flag.
122 */
123static inline void rcu_qsctr_inc(int cpu)
124{
125 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
126 rdp->passed_quiesc = 1;
127}
128static inline void rcu_bh_qsctr_inc(int cpu)
129{
130 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
131 rdp->passed_quiesc = 1;
132}
133
134extern int rcu_pending(int cpu);
135extern int rcu_needs_cpu(int cpu);
136
137#ifdef CONFIG_DEBUG_LOCK_ALLOC
138extern struct lockdep_map rcu_lock_map;
139# define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
140# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
141#else
142# define rcu_read_acquire() do { } while (0)
143# define rcu_read_release() do { } while (0)
144#endif
145
146/** 68/**
147 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 69 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
148 * 70 *
@@ -172,24 +94,13 @@ extern struct lockdep_map rcu_lock_map;
172 * 94 *
173 * It is illegal to block while in an RCU read-side critical section. 95 * It is illegal to block while in an RCU read-side critical section.
174 */ 96 */
175#define rcu_read_lock() \ 97#define rcu_read_lock() __rcu_read_lock()
176 do { \
177 preempt_disable(); \
178 __acquire(RCU); \
179 rcu_read_acquire(); \
180 } while(0)
181 98
182/** 99/**
183 * rcu_read_unlock - marks the end of an RCU read-side critical section. 100 * rcu_read_unlock - marks the end of an RCU read-side critical section.
184 * 101 *
185 * See rcu_read_lock() for more information. 102 * See rcu_read_lock() for more information.
186 */ 103 */
187#define rcu_read_unlock() \
188 do { \
189 rcu_read_release(); \
190 __release(RCU); \
191 preempt_enable(); \
192 } while(0)
193 104
194/* 105/*
195 * So where is rcu_write_lock()? It does not exist, as there is no 106 * So where is rcu_write_lock()? It does not exist, as there is no
@@ -200,6 +111,7 @@ extern struct lockdep_map rcu_lock_map;
200 * used as well. RCU does not care how the writers keep out of each 111 * used as well. RCU does not care how the writers keep out of each
201 * others' way, as long as they do so. 112 * others' way, as long as they do so.
202 */ 113 */
114#define rcu_read_unlock() __rcu_read_unlock()
203 115
204/** 116/**
205 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 117 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
@@ -212,24 +124,14 @@ extern struct lockdep_map rcu_lock_map;
212 * can use just rcu_read_lock(). 124 * can use just rcu_read_lock().
213 * 125 *
214 */ 126 */
215#define rcu_read_lock_bh() \ 127#define rcu_read_lock_bh() __rcu_read_lock_bh()
216 do { \
217 local_bh_disable(); \
218 __acquire(RCU_BH); \
219 rcu_read_acquire(); \
220 } while(0)
221 128
222/* 129/*
223 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 130 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
224 * 131 *
225 * See rcu_read_lock_bh() for more information. 132 * See rcu_read_lock_bh() for more information.
226 */ 133 */
227#define rcu_read_unlock_bh() \ 134#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
228 do { \
229 rcu_read_release(); \
230 __release(RCU_BH); \
231 local_bh_enable(); \
232 } while(0)
233 135
234/* 136/*
235 * Prevent the compiler from merging or refetching accesses. The compiler 137 * Prevent the compiler from merging or refetching accesses. The compiler
@@ -293,21 +195,52 @@ extern struct lockdep_map rcu_lock_map;
293 * In "classic RCU", these two guarantees happen to be one and 195 * In "classic RCU", these two guarantees happen to be one and
294 * the same, but can differ in realtime RCU implementations. 196 * the same, but can differ in realtime RCU implementations.
295 */ 197 */
296#define synchronize_sched() synchronize_rcu() 198#define synchronize_sched() __synchronize_sched()
297 199
298extern void rcu_init(void); 200/**
299extern void rcu_check_callbacks(int cpu, int user); 201 * call_rcu - Queue an RCU callback for invocation after a grace period.
300extern void rcu_restart_cpu(int cpu); 202 * @head: structure to be used for queueing the RCU updates.
301extern long rcu_batches_completed(void); 203 * @func: actual update function to be invoked after the grace period
302extern long rcu_batches_completed_bh(void); 204 *
205 * The update function will be invoked some time after a full grace
206 * period elapses, in other words after all currently executing RCU
207 * read-side critical sections have completed. RCU read-side critical
208 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
209 * and may be nested.
210 */
211extern void call_rcu(struct rcu_head *head,
212 void (*func)(struct rcu_head *head));
303 213
304/* Exported interfaces */ 214/**
305extern void FASTCALL(call_rcu(struct rcu_head *head, 215 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
306 void (*func)(struct rcu_head *head))); 216 * @head: structure to be used for queueing the RCU updates.
307extern void FASTCALL(call_rcu_bh(struct rcu_head *head, 217 * @func: actual update function to be invoked after the grace period
308 void (*func)(struct rcu_head *head))); 218 *
219 * The update function will be invoked some time after a full grace
220 * period elapses, in other words after all currently executing RCU
221 * read-side critical sections have completed. call_rcu_bh() assumes
222 * that the read-side critical sections end on completion of a softirq
223 * handler. This means that read-side critical sections in process
224 * context must not be interrupted by softirqs. This interface is to be
225 * used when most of the read-side critical sections are in softirq context.
226 * RCU read-side critical sections are delimited by :
227 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
228 * OR
229 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
230 * These may be nested.
231 */
232extern void call_rcu_bh(struct rcu_head *head,
233 void (*func)(struct rcu_head *head));
234
235/* Exported common interfaces */
309extern void synchronize_rcu(void); 236extern void synchronize_rcu(void);
310extern void rcu_barrier(void); 237extern void rcu_barrier(void);
238extern long rcu_batches_completed(void);
239extern long rcu_batches_completed_bh(void);
240
241/* Internal to kernel */
242extern void rcu_init(void);
243extern int rcu_needs_cpu(int cpu);
311 244
312#endif /* __KERNEL__ */ 245#endif /* __KERNEL__ */
313#endif /* __LINUX_RCUPDATE_H */ 246#endif /* __LINUX_RCUPDATE_H */