diff options
Diffstat (limited to 'kernel/rcupdate.c')
| -rw-r--r-- | kernel/rcupdate.c | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f1125c1a6321..49d808e833b0 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
| 46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
| 47 | #include <linux/kernel_stat.h> | 47 | #include <linux/kernel_stat.h> |
| 48 | #include <linux/hardirq.h> | ||
| 48 | 49 | ||
| 49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 50 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 50 | static struct lock_class_key rcu_lock_key; | 51 | static struct lock_class_key rcu_lock_key; |
| @@ -66,6 +67,35 @@ EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | |||
| 66 | int rcu_scheduler_active __read_mostly; | 67 | int rcu_scheduler_active __read_mostly; |
| 67 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 68 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
| 68 | 69 | ||
| 70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 71 | |||
| 72 | int debug_lockdep_rcu_enabled(void) | ||
| 73 | { | ||
| 74 | return rcu_scheduler_active && debug_locks && | ||
| 75 | current->lockdep_recursion == 0; | ||
| 76 | } | ||
| 77 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); | ||
| 78 | |||
| 79 | /** | ||
| 80 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | ||
| 81 | * | ||
| 82 | * Check for bottom half being disabled, which covers both the | ||
| 83 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses | ||
| 84 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) | ||
| 85 | * will show the situation. | ||
| 86 | * | ||
| 87 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. | ||
| 88 | */ | ||
| 89 | int rcu_read_lock_bh_held(void) | ||
| 90 | { | ||
| 91 | if (!debug_lockdep_rcu_enabled()) | ||
| 92 | return 1; | ||
| 93 | return in_softirq(); | ||
| 94 | } | ||
| 95 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | ||
| 96 | |||
| 97 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
| 98 | |||
| 69 | /* | 99 | /* |
| 70 | * This function is invoked towards the end of the scheduler's initialization | 100 | * This function is invoked towards the end of the scheduler's initialization |
| 71 | * process. Before this is called, the idle task might contain | 101 | * process. Before this is called, the idle task might contain |
| @@ -92,3 +122,14 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
| 92 | rcu = container_of(head, struct rcu_synchronize, head); | 122 | rcu = container_of(head, struct rcu_synchronize, head); |
| 93 | complete(&rcu->completion); | 123 | complete(&rcu->completion); |
| 94 | } | 124 | } |
| 125 | |||
| 126 | #ifdef CONFIG_PROVE_RCU | ||
| 127 | /* | ||
| 128 | * wrapper function to avoid #include problems. | ||
| 129 | */ | ||
| 130 | int rcu_my_thread_group_empty(void) | ||
| 131 | { | ||
| 132 | return thread_group_empty(current); | ||
| 133 | } | ||
| 134 | EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); | ||
| 135 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
