diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 29 |
1 files changed, 27 insertions, 2 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 7c968e4f929..275aa3f1062 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -90,6 +90,25 @@ extern void do_trace_rcu_torture_read(char *rcutorturename, | |||
90 | * that started after call_rcu() was invoked. RCU read-side critical | 90 | * that started after call_rcu() was invoked. RCU read-side critical |
91 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 91 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
92 | * and may be nested. | 92 | * and may be nested. |
93 | * | ||
94 | * Note that all CPUs must agree that the grace period extended beyond | ||
95 | * all pre-existing RCU read-side critical section. On systems with more | ||
96 | * than one CPU, this means that when "func()" is invoked, each CPU is | ||
97 | * guaranteed to have executed a full memory barrier since the end of its | ||
98 | * last RCU read-side critical section whose beginning preceded the call | ||
99 | * to call_rcu(). It also means that each CPU executing an RCU read-side | ||
100 | * critical section that continues beyond the start of "func()" must have | ||
101 | * executed a memory barrier after the call_rcu() but before the beginning | ||
102 | * of that RCU read-side critical section. Note that these guarantees | ||
103 | * include CPUs that are offline, idle, or executing in user mode, as | ||
104 | * well as CPUs that are executing in the kernel. | ||
105 | * | ||
106 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the | ||
107 | * resulting RCU callback function "func()", then both CPU A and CPU B are | ||
108 | * guaranteed to execute a full memory barrier during the time interval | ||
109 | * between the call to call_rcu() and the invocation of "func()" -- even | ||
110 | * if CPU A and CPU B are the same CPU (but again only if the system has | ||
111 | * more than one CPU). | ||
93 | */ | 112 | */ |
94 | extern void call_rcu(struct rcu_head *head, | 113 | extern void call_rcu(struct rcu_head *head, |
95 | void (*func)(struct rcu_head *head)); | 114 | void (*func)(struct rcu_head *head)); |
@@ -118,6 +137,9 @@ extern void call_rcu(struct rcu_head *head, | |||
118 | * OR | 137 | * OR |
119 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | 138 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. |
120 | * These may be nested. | 139 | * These may be nested. |
140 | * | ||
141 | * See the description of call_rcu() for more detailed information on | ||
142 | * memory ordering guarantees. | ||
121 | */ | 143 | */ |
122 | extern void call_rcu_bh(struct rcu_head *head, | 144 | extern void call_rcu_bh(struct rcu_head *head, |
123 | void (*func)(struct rcu_head *head)); | 145 | void (*func)(struct rcu_head *head)); |
@@ -137,6 +159,9 @@ extern void call_rcu_bh(struct rcu_head *head, | |||
137 | * OR | 159 | * OR |
138 | * anything that disables preemption. | 160 | * anything that disables preemption. |
139 | * These may be nested. | 161 | * These may be nested. |
162 | * | ||
163 | * See the description of call_rcu() for more detailed information on | ||
164 | * memory ordering guarantees. | ||
140 | */ | 165 | */ |
141 | extern void call_rcu_sched(struct rcu_head *head, | 166 | extern void call_rcu_sched(struct rcu_head *head, |
142 | void (*func)(struct rcu_head *rcu)); | 167 | void (*func)(struct rcu_head *rcu)); |
@@ -197,13 +222,13 @@ extern void rcu_user_enter(void); | |||
197 | extern void rcu_user_exit(void); | 222 | extern void rcu_user_exit(void); |
198 | extern void rcu_user_enter_after_irq(void); | 223 | extern void rcu_user_enter_after_irq(void); |
199 | extern void rcu_user_exit_after_irq(void); | 224 | extern void rcu_user_exit_after_irq(void); |
200 | extern void rcu_user_hooks_switch(struct task_struct *prev, | ||
201 | struct task_struct *next); | ||
202 | #else | 225 | #else |
203 | static inline void rcu_user_enter(void) { } | 226 | static inline void rcu_user_enter(void) { } |
204 | static inline void rcu_user_exit(void) { } | 227 | static inline void rcu_user_exit(void) { } |
205 | static inline void rcu_user_enter_after_irq(void) { } | 228 | static inline void rcu_user_enter_after_irq(void) { } |
206 | static inline void rcu_user_exit_after_irq(void) { } | 229 | static inline void rcu_user_exit_after_irq(void) { } |
230 | static inline void rcu_user_hooks_switch(struct task_struct *prev, | ||
231 | struct task_struct *next) { } | ||
207 | #endif /* CONFIG_RCU_USER_QS */ | 232 | #endif /* CONFIG_RCU_USER_QS */ |
208 | 233 | ||
209 | extern void exit_rcu(void); | 234 | extern void exit_rcu(void); |