diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-05-12 15:20:44 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:39:40 -0400 |
commit | 0764d23cf066c52de42b653144605b481d3fbdbc (patch) | |
tree | 940c6fb3f1fd674bdabbebc21a15b48a48e2ec24 /kernel | |
parent | 361943ad0ba3f16e66859e30a408915e008ba91e (diff) |
ftrace: lockdep notrace annotations
Add notrace annotations to lockdep to keep ftrace from causing
recursive problems with lock tracing and debugging.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/lockdep.c | 23 | ||||
-rw-r--r-- | kernel/spinlock.c | 2 |
2 files changed, 13 insertions, 12 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e21924365ea3..ac46847ba0c9 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -271,14 +271,14 @@ static struct list_head chainhash_table[CHAINHASH_SIZE]; | |||
271 | ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ | 271 | ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ |
272 | (key2)) | 272 | (key2)) |
273 | 273 | ||
274 | void lockdep_off(void) | 274 | notrace void lockdep_off(void) |
275 | { | 275 | { |
276 | current->lockdep_recursion++; | 276 | current->lockdep_recursion++; |
277 | } | 277 | } |
278 | 278 | ||
279 | EXPORT_SYMBOL(lockdep_off); | 279 | EXPORT_SYMBOL(lockdep_off); |
280 | 280 | ||
281 | void lockdep_on(void) | 281 | notrace void lockdep_on(void) |
282 | { | 282 | { |
283 | current->lockdep_recursion--; | 283 | current->lockdep_recursion--; |
284 | } | 284 | } |
@@ -1041,7 +1041,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) | |||
1041 | * Return 1 otherwise and keep <backwards_match> unchanged. | 1041 | * Return 1 otherwise and keep <backwards_match> unchanged. |
1042 | * Return 0 on error. | 1042 | * Return 0 on error. |
1043 | */ | 1043 | */ |
1044 | static noinline int | 1044 | static noinline notrace int |
1045 | find_usage_backwards(struct lock_class *source, unsigned int depth) | 1045 | find_usage_backwards(struct lock_class *source, unsigned int depth) |
1046 | { | 1046 | { |
1047 | struct lock_list *entry; | 1047 | struct lock_list *entry; |
@@ -1591,7 +1591,7 @@ static inline int validate_chain(struct task_struct *curr, | |||
1591 | * We are building curr_chain_key incrementally, so double-check | 1591 | * We are building curr_chain_key incrementally, so double-check |
1592 | * it from scratch, to make sure that it's done correctly: | 1592 | * it from scratch, to make sure that it's done correctly: |
1593 | */ | 1593 | */ |
1594 | static void check_chain_key(struct task_struct *curr) | 1594 | static notrace void check_chain_key(struct task_struct *curr) |
1595 | { | 1595 | { |
1596 | #ifdef CONFIG_DEBUG_LOCKDEP | 1596 | #ifdef CONFIG_DEBUG_LOCKDEP |
1597 | struct held_lock *hlock, *prev_hlock = NULL; | 1597 | struct held_lock *hlock, *prev_hlock = NULL; |
@@ -1967,7 +1967,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1967 | /* | 1967 | /* |
1968 | * Mark all held locks with a usage bit: | 1968 | * Mark all held locks with a usage bit: |
1969 | */ | 1969 | */ |
1970 | static int | 1970 | static notrace int |
1971 | mark_held_locks(struct task_struct *curr, int hardirq) | 1971 | mark_held_locks(struct task_struct *curr, int hardirq) |
1972 | { | 1972 | { |
1973 | enum lock_usage_bit usage_bit; | 1973 | enum lock_usage_bit usage_bit; |
@@ -2260,8 +2260,8 @@ static inline int separate_irq_context(struct task_struct *curr, | |||
2260 | /* | 2260 | /* |
2261 | * Mark a lock with a usage bit, and validate the state transition: | 2261 | * Mark a lock with a usage bit, and validate the state transition: |
2262 | */ | 2262 | */ |
2263 | static int mark_lock(struct task_struct *curr, struct held_lock *this, | 2263 | static notrace int mark_lock(struct task_struct *curr, struct held_lock *this, |
2264 | enum lock_usage_bit new_bit) | 2264 | enum lock_usage_bit new_bit) |
2265 | { | 2265 | { |
2266 | unsigned int new_mask = 1 << new_bit, ret = 1; | 2266 | unsigned int new_mask = 1 << new_bit, ret = 1; |
2267 | 2267 | ||
@@ -2663,7 +2663,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | |||
2663 | /* | 2663 | /* |
2664 | * Check whether we follow the irq-flags state precisely: | 2664 | * Check whether we follow the irq-flags state precisely: |
2665 | */ | 2665 | */ |
2666 | static void check_flags(unsigned long flags) | 2666 | static notrace void check_flags(unsigned long flags) |
2667 | { | 2667 | { |
2668 | #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) | 2668 | #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) |
2669 | if (!debug_locks) | 2669 | if (!debug_locks) |
@@ -2700,8 +2700,8 @@ static void check_flags(unsigned long flags) | |||
2700 | * We are not always called with irqs disabled - do that here, | 2700 | * We are not always called with irqs disabled - do that here, |
2701 | * and also avoid lockdep recursion: | 2701 | * and also avoid lockdep recursion: |
2702 | */ | 2702 | */ |
2703 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2703 | notrace void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
2704 | int trylock, int read, int check, unsigned long ip) | 2704 | int trylock, int read, int check, unsigned long ip) |
2705 | { | 2705 | { |
2706 | unsigned long flags; | 2706 | unsigned long flags; |
2707 | 2707 | ||
@@ -2723,7 +2723,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2723 | 2723 | ||
2724 | EXPORT_SYMBOL_GPL(lock_acquire); | 2724 | EXPORT_SYMBOL_GPL(lock_acquire); |
2725 | 2725 | ||
2726 | void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | 2726 | notrace void lock_release(struct lockdep_map *lock, int nested, |
2727 | unsigned long ip) | ||
2727 | { | 2728 | { |
2728 | unsigned long flags; | 2729 | unsigned long flags; |
2729 | 2730 | ||
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index ae28c8245123..a1fb54c93cdd 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -436,7 +436,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock) | |||
436 | } | 436 | } |
437 | EXPORT_SYMBOL(_spin_trylock_bh); | 437 | EXPORT_SYMBOL(_spin_trylock_bh); |
438 | 438 | ||
439 | int in_lock_functions(unsigned long addr) | 439 | notrace int in_lock_functions(unsigned long addr) |
440 | { | 440 | { |
441 | /* Linker adds these: start and end of __lockfunc functions */ | 441 | /* Linker adds these: start and end of __lockfunc functions */ |
442 | extern char __lock_text_start[], __lock_text_end[]; | 442 | extern char __lock_text_start[], __lock_text_end[]; |