diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 126 |
1 files changed, 116 insertions, 10 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 24440f4bf476..e3d37efe2703 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -78,14 +78,120 @@ extern void rcu_init(void); | |||
78 | } while (0) | 78 | } while (0) |
79 | 79 | ||
80 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 80 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
81 | |||
81 | extern struct lockdep_map rcu_lock_map; | 82 | extern struct lockdep_map rcu_lock_map; |
82 | # define rcu_read_acquire() \ | 83 | # define rcu_read_acquire() \ |
83 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 84 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
84 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | 85 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) |
85 | #else | 86 | |
86 | # define rcu_read_acquire() do { } while (0) | 87 | extern struct lockdep_map rcu_bh_lock_map; |
87 | # define rcu_read_release() do { } while (0) | 88 | # define rcu_read_acquire_bh() \ |
88 | #endif | 89 | lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
90 | # define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) | ||
91 | |||
92 | extern struct lockdep_map rcu_sched_lock_map; | ||
93 | # define rcu_read_acquire_sched() \ | ||
94 | lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
95 | # define rcu_read_release_sched() \ | ||
96 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | ||
97 | |||
98 | /** | ||
99 | * rcu_read_lock_held - might we be in RCU read-side critical section? | ||
100 | * | ||
101 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | ||
102 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
103 | * this assumes we are in an RCU read-side critical section unless it can | ||
104 | * prove otherwise. | ||
105 | */ | ||
106 | static inline int rcu_read_lock_held(void) | ||
107 | { | ||
108 | if (debug_locks) | ||
109 | return lock_is_held(&rcu_lock_map); | ||
110 | return 1; | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | ||
115 | * | ||
116 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | ||
117 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
118 | * this assumes we are in an RCU-bh read-side critical section unless it can | ||
119 | * prove otherwise. | ||
120 | */ | ||
121 | static inline int rcu_read_lock_bh_held(void) | ||
122 | { | ||
123 | if (debug_locks) | ||
124 | return lock_is_held(&rcu_bh_lock_map); | ||
125 | return 1; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | ||
130 | * | ||
131 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an | ||
132 | * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
133 | * this assumes we are in an RCU-sched read-side critical section unless it | ||
134 | * can prove otherwise. Note that disabling of preemption (including | ||
135 | * disabling irqs) counts as an RCU-sched read-side critical section. | ||
136 | */ | ||
137 | static inline int rcu_read_lock_sched_held(void) | ||
138 | { | ||
139 | int lockdep_opinion = 0; | ||
140 | |||
141 | if (debug_locks) | ||
142 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | ||
143 | return lockdep_opinion || preempt_count() != 0; | ||
144 | } | ||
145 | |||
146 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
147 | |||
148 | # define rcu_read_acquire() do { } while (0) | ||
149 | # define rcu_read_release() do { } while (0) | ||
150 | # define rcu_read_acquire_bh() do { } while (0) | ||
151 | # define rcu_read_release_bh() do { } while (0) | ||
152 | # define rcu_read_acquire_sched() do { } while (0) | ||
153 | # define rcu_read_release_sched() do { } while (0) | ||
154 | |||
155 | static inline int rcu_read_lock_held(void) | ||
156 | { | ||
157 | return 1; | ||
158 | } | ||
159 | |||
160 | static inline int rcu_read_lock_bh_held(void) | ||
161 | { | ||
162 | return 1; | ||
163 | } | ||
164 | |||
165 | static inline int rcu_read_lock_sched_held(void) | ||
166 | { | ||
167 | return preempt_count() != 0; | ||
168 | } | ||
169 | |||
170 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
171 | |||
172 | #ifdef CONFIG_PROVE_RCU | ||
173 | |||
174 | /** | ||
175 | * rcu_dereference_check - rcu_dereference with debug checking | ||
176 | * | ||
177 | * Do an rcu_dereference(), but check that the context is correct. | ||
178 | * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to | ||
179 | * ensure that the rcu_dereference_check() executes within an RCU | ||
180 | * read-side critical section. It is also possible to check for | ||
181 | * locks being held, for example, by using lockdep_is_held(). | ||
182 | */ | ||
183 | #define rcu_dereference_check(p, c) \ | ||
184 | ({ \ | ||
185 | if (debug_locks) \ | ||
186 | WARN_ON_ONCE(!(c)); \ | ||
187 | rcu_dereference(p); \ | ||
188 | }) | ||
189 | |||
190 | #else /* #ifdef CONFIG_PROVE_RCU */ | ||
191 | |||
192 | #define rcu_dereference_check(p, c) rcu_dereference(p) | ||
193 | |||
194 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
89 | 195 | ||
90 | /** | 196 | /** |
91 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 197 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
@@ -160,7 +266,7 @@ static inline void rcu_read_lock_bh(void) | |||
160 | { | 266 | { |
161 | __rcu_read_lock_bh(); | 267 | __rcu_read_lock_bh(); |
162 | __acquire(RCU_BH); | 268 | __acquire(RCU_BH); |
163 | rcu_read_acquire(); | 269 | rcu_read_acquire_bh(); |
164 | } | 270 | } |
165 | 271 | ||
166 | /* | 272 | /* |
@@ -170,7 +276,7 @@ static inline void rcu_read_lock_bh(void) | |||
170 | */ | 276 | */ |
171 | static inline void rcu_read_unlock_bh(void) | 277 | static inline void rcu_read_unlock_bh(void) |
172 | { | 278 | { |
173 | rcu_read_release(); | 279 | rcu_read_release_bh(); |
174 | __release(RCU_BH); | 280 | __release(RCU_BH); |
175 | __rcu_read_unlock_bh(); | 281 | __rcu_read_unlock_bh(); |
176 | } | 282 | } |
@@ -188,7 +294,7 @@ static inline void rcu_read_lock_sched(void) | |||
188 | { | 294 | { |
189 | preempt_disable(); | 295 | preempt_disable(); |
190 | __acquire(RCU_SCHED); | 296 | __acquire(RCU_SCHED); |
191 | rcu_read_acquire(); | 297 | rcu_read_acquire_sched(); |
192 | } | 298 | } |
193 | 299 | ||
194 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ | 300 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
@@ -205,7 +311,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
205 | */ | 311 | */ |
206 | static inline void rcu_read_unlock_sched(void) | 312 | static inline void rcu_read_unlock_sched(void) |
207 | { | 313 | { |
208 | rcu_read_release(); | 314 | rcu_read_release_sched(); |
209 | __release(RCU_SCHED); | 315 | __release(RCU_SCHED); |
210 | preempt_enable(); | 316 | preempt_enable(); |
211 | } | 317 | } |