diff options
Diffstat (limited to 'kernel/trace/trace_stack.c')
| -rw-r--r-- | kernel/trace/trace_stack.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 3bdb44bde4b7..0b863f2cbc8e 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -48,7 +48,7 @@ static inline void check_stack(void) | |||
| 48 | if (!object_is_on_stack(&this_size)) | 48 | if (!object_is_on_stack(&this_size)) |
| 49 | return; | 49 | return; |
| 50 | 50 | ||
| 51 | raw_local_irq_save(flags); | 51 | local_irq_save(flags); |
| 52 | __raw_spin_lock(&max_stack_lock); | 52 | __raw_spin_lock(&max_stack_lock); |
| 53 | 53 | ||
| 54 | /* a race could have already updated it */ | 54 | /* a race could have already updated it */ |
| @@ -78,6 +78,7 @@ static inline void check_stack(void) | |||
| 78 | * on a new max, so it is far from a fast path. | 78 | * on a new max, so it is far from a fast path. |
| 79 | */ | 79 | */ |
| 80 | while (i < max_stack_trace.nr_entries) { | 80 | while (i < max_stack_trace.nr_entries) { |
| 81 | int found = 0; | ||
| 81 | 82 | ||
| 82 | stack_dump_index[i] = this_size; | 83 | stack_dump_index[i] = this_size; |
| 83 | p = start; | 84 | p = start; |
| @@ -86,17 +87,19 @@ static inline void check_stack(void) | |||
| 86 | if (*p == stack_dump_trace[i]) { | 87 | if (*p == stack_dump_trace[i]) { |
| 87 | this_size = stack_dump_index[i++] = | 88 | this_size = stack_dump_index[i++] = |
| 88 | (top - p) * sizeof(unsigned long); | 89 | (top - p) * sizeof(unsigned long); |
| 90 | found = 1; | ||
| 89 | /* Start the search from here */ | 91 | /* Start the search from here */ |
| 90 | start = p + 1; | 92 | start = p + 1; |
| 91 | } | 93 | } |
| 92 | } | 94 | } |
| 93 | 95 | ||
| 94 | i++; | 96 | if (!found) |
| 97 | i++; | ||
| 95 | } | 98 | } |
| 96 | 99 | ||
| 97 | out: | 100 | out: |
| 98 | __raw_spin_unlock(&max_stack_lock); | 101 | __raw_spin_unlock(&max_stack_lock); |
| 99 | raw_local_irq_restore(flags); | 102 | local_irq_restore(flags); |
| 100 | } | 103 | } |
| 101 | 104 | ||
| 102 | static void | 105 | static void |
| @@ -107,8 +110,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 107 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 110 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
| 108 | return; | 111 | return; |
| 109 | 112 | ||
| 110 | resched = need_resched(); | 113 | resched = ftrace_preempt_disable(); |
| 111 | preempt_disable_notrace(); | ||
| 112 | 114 | ||
| 113 | cpu = raw_smp_processor_id(); | 115 | cpu = raw_smp_processor_id(); |
| 114 | /* no atomic needed, we only modify this variable by this cpu */ | 116 | /* no atomic needed, we only modify this variable by this cpu */ |
| @@ -120,10 +122,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 120 | out: | 122 | out: |
| 121 | per_cpu(trace_active, cpu)--; | 123 | per_cpu(trace_active, cpu)--; |
| 122 | /* prevent recursion in schedule */ | 124 | /* prevent recursion in schedule */ |
| 123 | if (resched) | 125 | ftrace_preempt_enable(resched); |
| 124 | preempt_enable_no_resched_notrace(); | ||
| 125 | else | ||
| 126 | preempt_enable_notrace(); | ||
| 127 | } | 126 | } |
| 128 | 127 | ||
| 129 | static struct ftrace_ops trace_ops __read_mostly = | 128 | static struct ftrace_ops trace_ops __read_mostly = |
| @@ -166,11 +165,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
| 166 | if (ret < 0) | 165 | if (ret < 0) |
| 167 | return ret; | 166 | return ret; |
| 168 | 167 | ||
| 169 | raw_local_irq_save(flags); | 168 | local_irq_save(flags); |
| 170 | __raw_spin_lock(&max_stack_lock); | 169 | __raw_spin_lock(&max_stack_lock); |
| 171 | *ptr = val; | 170 | *ptr = val; |
| 172 | __raw_spin_unlock(&max_stack_lock); | 171 | __raw_spin_unlock(&max_stack_lock); |
| 173 | raw_local_irq_restore(flags); | 172 | local_irq_restore(flags); |
| 174 | 173 | ||
| 175 | return count; | 174 | return count; |
| 176 | } | 175 | } |
