aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-05-12 15:20:55 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 15:15:14 -0400
commit1d09daa55d2e9bab7e7d30f0d05e5a7bc60b2a4a (patch)
tree81b138d50b84e052d49f7710ce03250cde1b3606 /kernel/lockdep.c
parent9d0a420b737f72d84fabebf29634d800cbf54538 (diff)
ftrace: use Makefile to remove tracing from lockdep
This patch removes the "notrace" annotation from lockdep and adds the debugging files in the kernel director to those that should not be compiled with "-pg" mcount tracing. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ac46847ba0c9..90a440cbd6de 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -271,14 +271,14 @@ static struct list_head chainhash_table[CHAINHASH_SIZE];
271 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ 271 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
272 (key2)) 272 (key2))
273 273
274notrace void lockdep_off(void) 274void lockdep_off(void)
275{ 275{
276 current->lockdep_recursion++; 276 current->lockdep_recursion++;
277} 277}
278 278
279EXPORT_SYMBOL(lockdep_off); 279EXPORT_SYMBOL(lockdep_off);
280 280
281notrace void lockdep_on(void) 281void lockdep_on(void)
282{ 282{
283 current->lockdep_recursion--; 283 current->lockdep_recursion--;
284} 284}
@@ -1041,7 +1041,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
1041 * Return 1 otherwise and keep <backwards_match> unchanged. 1041 * Return 1 otherwise and keep <backwards_match> unchanged.
1042 * Return 0 on error. 1042 * Return 0 on error.
1043 */ 1043 */
1044static noinline notrace int 1044static noinline int
1045find_usage_backwards(struct lock_class *source, unsigned int depth) 1045find_usage_backwards(struct lock_class *source, unsigned int depth)
1046{ 1046{
1047 struct lock_list *entry; 1047 struct lock_list *entry;
@@ -1591,7 +1591,7 @@ static inline int validate_chain(struct task_struct *curr,
1591 * We are building curr_chain_key incrementally, so double-check 1591 * We are building curr_chain_key incrementally, so double-check
1592 * it from scratch, to make sure that it's done correctly: 1592 * it from scratch, to make sure that it's done correctly:
1593 */ 1593 */
1594static notrace void check_chain_key(struct task_struct *curr) 1594static void check_chain_key(struct task_struct *curr)
1595{ 1595{
1596#ifdef CONFIG_DEBUG_LOCKDEP 1596#ifdef CONFIG_DEBUG_LOCKDEP
1597 struct held_lock *hlock, *prev_hlock = NULL; 1597 struct held_lock *hlock, *prev_hlock = NULL;
@@ -1967,7 +1967,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1967/* 1967/*
1968 * Mark all held locks with a usage bit: 1968 * Mark all held locks with a usage bit:
1969 */ 1969 */
1970static notrace int 1970static int
1971mark_held_locks(struct task_struct *curr, int hardirq) 1971mark_held_locks(struct task_struct *curr, int hardirq)
1972{ 1972{
1973 enum lock_usage_bit usage_bit; 1973 enum lock_usage_bit usage_bit;
@@ -2014,7 +2014,7 @@ void early_boot_irqs_on(void)
2014/* 2014/*
2015 * Hardirqs will be enabled: 2015 * Hardirqs will be enabled:
2016 */ 2016 */
2017void notrace trace_hardirqs_on_caller(unsigned long a0) 2017void trace_hardirqs_on_caller(unsigned long a0)
2018{ 2018{
2019 struct task_struct *curr = current; 2019 struct task_struct *curr = current;
2020 unsigned long ip; 2020 unsigned long ip;
@@ -2060,7 +2060,7 @@ void notrace trace_hardirqs_on_caller(unsigned long a0)
2060} 2060}
2061EXPORT_SYMBOL(trace_hardirqs_on_caller); 2061EXPORT_SYMBOL(trace_hardirqs_on_caller);
2062 2062
2063void notrace trace_hardirqs_on(void) 2063void trace_hardirqs_on(void)
2064{ 2064{
2065 trace_hardirqs_on_caller(CALLER_ADDR0); 2065 trace_hardirqs_on_caller(CALLER_ADDR0);
2066} 2066}
@@ -2069,7 +2069,7 @@ EXPORT_SYMBOL(trace_hardirqs_on);
2069/* 2069/*
2070 * Hardirqs were disabled: 2070 * Hardirqs were disabled:
2071 */ 2071 */
2072void notrace trace_hardirqs_off_caller(unsigned long a0) 2072void trace_hardirqs_off_caller(unsigned long a0)
2073{ 2073{
2074 struct task_struct *curr = current; 2074 struct task_struct *curr = current;
2075 2075
@@ -2094,7 +2094,7 @@ void notrace trace_hardirqs_off_caller(unsigned long a0)
2094} 2094}
2095EXPORT_SYMBOL(trace_hardirqs_off_caller); 2095EXPORT_SYMBOL(trace_hardirqs_off_caller);
2096 2096
2097void notrace trace_hardirqs_off(void) 2097void trace_hardirqs_off(void)
2098{ 2098{
2099 trace_hardirqs_off_caller(CALLER_ADDR0); 2099 trace_hardirqs_off_caller(CALLER_ADDR0);
2100} 2100}
@@ -2260,7 +2260,7 @@ static inline int separate_irq_context(struct task_struct *curr,
2260/* 2260/*
2261 * Mark a lock with a usage bit, and validate the state transition: 2261 * Mark a lock with a usage bit, and validate the state transition:
2262 */ 2262 */
2263static notrace int mark_lock(struct task_struct *curr, struct held_lock *this, 2263static int mark_lock(struct task_struct *curr, struct held_lock *this,
2264 enum lock_usage_bit new_bit) 2264 enum lock_usage_bit new_bit)
2265{ 2265{
2266 unsigned int new_mask = 1 << new_bit, ret = 1; 2266 unsigned int new_mask = 1 << new_bit, ret = 1;
@@ -2663,7 +2663,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2663/* 2663/*
2664 * Check whether we follow the irq-flags state precisely: 2664 * Check whether we follow the irq-flags state precisely:
2665 */ 2665 */
2666static notrace void check_flags(unsigned long flags) 2666static void check_flags(unsigned long flags)
2667{ 2667{
2668#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) 2668#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
2669 if (!debug_locks) 2669 if (!debug_locks)
@@ -2700,7 +2700,7 @@ static notrace void check_flags(unsigned long flags)
2700 * We are not always called with irqs disabled - do that here, 2700 * We are not always called with irqs disabled - do that here,
2701 * and also avoid lockdep recursion: 2701 * and also avoid lockdep recursion:
2702 */ 2702 */
2703notrace void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2703void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2704 int trylock, int read, int check, unsigned long ip) 2704 int trylock, int read, int check, unsigned long ip)
2705{ 2705{
2706 unsigned long flags; 2706 unsigned long flags;
@@ -2723,7 +2723,7 @@ notrace void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2723 2723
2724EXPORT_SYMBOL_GPL(lock_acquire); 2724EXPORT_SYMBOL_GPL(lock_acquire);
2725 2725
2726notrace void lock_release(struct lockdep_map *lock, int nested, 2726void lock_release(struct lockdep_map *lock, int nested,
2727 unsigned long ip) 2727 unsigned long ip)
2728{ 2728{
2729 unsigned long flags; 2729 unsigned long flags;