diff options
-rw-r--r-- | include/linux/rcupdate.h | 19 | ||||
-rw-r--r-- | include/linux/srcu.h | 11 | ||||
-rw-r--r-- | kernel/rcupdate.c | 5 | ||||
-rw-r--r-- | kernel/rcutree.c | 29 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 1 |
5 files changed, 61 insertions, 4 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index f409529ff35a..146d37d31778 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -226,6 +226,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | |||
226 | } | 226 | } |
227 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 227 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
228 | 228 | ||
229 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) | ||
230 | bool rcu_lockdep_current_cpu_online(void); | ||
231 | #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ | ||
232 | static inline bool rcu_lockdep_current_cpu_online(void) | ||
233 | { | ||
234 | return 1; | ||
235 | } | ||
236 | #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ | ||
237 | |||
229 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 238 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
230 | 239 | ||
231 | #ifdef CONFIG_PROVE_RCU | 240 | #ifdef CONFIG_PROVE_RCU |
@@ -270,6 +279,9 @@ extern int debug_lockdep_rcu_enabled(void); | |||
270 | * occur in the same context, for example, it is illegal to invoke | 279 | * occur in the same context, for example, it is illegal to invoke |
271 | * rcu_read_unlock() in process context if the matching rcu_read_lock() | 280 | * rcu_read_unlock() in process context if the matching rcu_read_lock() |
272 | * was invoked from within an irq handler. | 281 | * was invoked from within an irq handler. |
282 | * | ||
283 | * Note that rcu_read_lock() is disallowed if the CPU is either idle or | ||
284 | * offline from an RCU perspective, so check for those as well. | ||
273 | */ | 285 | */ |
274 | static inline int rcu_read_lock_held(void) | 286 | static inline int rcu_read_lock_held(void) |
275 | { | 287 | { |
@@ -277,6 +289,8 @@ static inline int rcu_read_lock_held(void) | |||
277 | return 1; | 289 | return 1; |
278 | if (rcu_is_cpu_idle()) | 290 | if (rcu_is_cpu_idle()) |
279 | return 0; | 291 | return 0; |
292 | if (!rcu_lockdep_current_cpu_online()) | ||
293 | return 0; | ||
280 | return lock_is_held(&rcu_lock_map); | 294 | return lock_is_held(&rcu_lock_map); |
281 | } | 295 | } |
282 | 296 | ||
@@ -313,6 +327,9 @@ extern int rcu_read_lock_bh_held(void); | |||
313 | * notice an extended quiescent state to other CPUs that started a grace | 327 | * notice an extended quiescent state to other CPUs that started a grace |
314 | * period. Otherwise we would delay any grace period as long as we run in | 328 | * period. Otherwise we would delay any grace period as long as we run in |
315 | * the idle task. | 329 | * the idle task. |
330 | * | ||
331 | * Similarly, we avoid claiming an SRCU read lock held if the current | ||
332 | * CPU is offline. | ||
316 | */ | 333 | */ |
317 | #ifdef CONFIG_PREEMPT_COUNT | 334 | #ifdef CONFIG_PREEMPT_COUNT |
318 | static inline int rcu_read_lock_sched_held(void) | 335 | static inline int rcu_read_lock_sched_held(void) |
@@ -323,6 +340,8 @@ static inline int rcu_read_lock_sched_held(void) | |||
323 | return 1; | 340 | return 1; |
324 | if (rcu_is_cpu_idle()) | 341 | if (rcu_is_cpu_idle()) |
325 | return 0; | 342 | return 0; |
343 | if (!rcu_lockdep_current_cpu_online()) | ||
344 | return 0; | ||
326 | if (debug_locks) | 345 | if (debug_locks) |
327 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | 346 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
328 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); | 347 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index e1b005918bbb..9a323728e60c 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -99,15 +99,18 @@ long srcu_batches_completed(struct srcu_struct *sp); | |||
99 | * power mode. This way we can notice an extended quiescent state to | 99 | * power mode. This way we can notice an extended quiescent state to |
100 | * other CPUs that started a grace period. Otherwise we would delay any | 100 | * other CPUs that started a grace period. Otherwise we would delay any |
101 | * grace period as long as we run in the idle task. | 101 | * grace period as long as we run in the idle task. |
102 | * | ||
103 | * Similarly, we avoid claiming an SRCU read lock held if the current | ||
104 | * CPU is offline. | ||
102 | */ | 105 | */ |
103 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | 106 | static inline int srcu_read_lock_held(struct srcu_struct *sp) |
104 | { | 107 | { |
105 | if (rcu_is_cpu_idle()) | ||
106 | return 0; | ||
107 | |||
108 | if (!debug_lockdep_rcu_enabled()) | 108 | if (!debug_lockdep_rcu_enabled()) |
109 | return 1; | 109 | return 1; |
110 | 110 | if (rcu_is_cpu_idle()) | |
111 | return 0; | ||
112 | if (!rcu_lockdep_current_cpu_online()) | ||
113 | return 0; | ||
111 | return lock_is_held(&sp->dep_map); | 114 | return lock_is_held(&sp->dep_map); |
112 | } | 115 | } |
113 | 116 | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 2bc4e135ff23..a86f1741cc27 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -88,6 +88,9 @@ EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); | |||
88 | * section. | 88 | * section. |
89 | * | 89 | * |
90 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. | 90 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. |
91 | * | ||
92 | * Note that rcu_read_lock() is disallowed if the CPU is either idle or | ||
93 | * offline from an RCU perspective, so check for those as well. | ||
91 | */ | 94 | */ |
92 | int rcu_read_lock_bh_held(void) | 95 | int rcu_read_lock_bh_held(void) |
93 | { | 96 | { |
@@ -95,6 +98,8 @@ int rcu_read_lock_bh_held(void) | |||
95 | return 1; | 98 | return 1; |
96 | if (rcu_is_cpu_idle()) | 99 | if (rcu_is_cpu_idle()) |
97 | return 0; | 100 | return 0; |
101 | if (!rcu_lockdep_current_cpu_online()) | ||
102 | return 0; | ||
98 | return in_softirq() || irqs_disabled(); | 103 | return in_softirq() || irqs_disabled(); |
99 | } | 104 | } |
100 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | 105 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index dccd2f78db4e..bcf7db2f2fd2 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -591,6 +591,35 @@ int rcu_is_cpu_idle(void) | |||
591 | } | 591 | } |
592 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 592 | EXPORT_SYMBOL(rcu_is_cpu_idle); |
593 | 593 | ||
594 | #ifdef CONFIG_HOTPLUG_CPU | ||
595 | |||
596 | /* | ||
597 | * Is the current CPU online? Disable preemption to avoid false positives | ||
598 | * that could otherwise happen due to the current CPU number being sampled, | ||
599 | * this task being preempted, its old CPU being taken offline, resuming | ||
600 | * on some other CPU, then determining that its old CPU is now offline. | ||
601 | * It is OK to use RCU on an offline processor during initial boot, hence | ||
602 | * the check for rcu_scheduler_fully_active. | ||
603 | * | ||
604 | * Disable checking if in an NMI handler because we cannot safely report | ||
605 | * errors from NMI handlers anyway. | ||
606 | */ | ||
607 | bool rcu_lockdep_current_cpu_online(void) | ||
608 | { | ||
609 | bool ret; | ||
610 | |||
611 | if (in_nmi()) | ||
612 | return 1; | ||
613 | preempt_disable(); | ||
614 | ret = cpu_online(smp_processor_id()) || | ||
615 | !rcu_scheduler_fully_active; | ||
616 | preempt_enable(); | ||
617 | return ret; | ||
618 | } | ||
619 | EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); | ||
620 | |||
621 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
622 | |||
594 | #endif /* #ifdef CONFIG_PROVE_RCU */ | 623 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
595 | 624 | ||
596 | /** | 625 | /** |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index aa93b074bb2f..cecea84f4f3f 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1946,6 +1946,7 @@ void synchronize_sched_expedited(void) | |||
1946 | /* Note that atomic_inc_return() implies full memory barrier. */ | 1946 | /* Note that atomic_inc_return() implies full memory barrier. */ |
1947 | firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); | 1947 | firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); |
1948 | get_online_cpus(); | 1948 | get_online_cpus(); |
1949 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); | ||
1949 | 1950 | ||
1950 | /* | 1951 | /* |
1951 | * Each pass through the following loop attempts to force a | 1952 | * Each pass through the following loop attempts to force a |