diff options
| author | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-12-29 00:47:18 -0500 |
|---|---|---|
| committer | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-12-29 00:47:18 -0500 |
| commit | 0a8c5395f90f06d128247844b2515c8bf3f2826b (patch) | |
| tree | d95382dcdfa303b99d480c01763d6cb6767fdaca /kernel/trace/trace_stack.c | |
| parent | 25051158bbed127e8672b43396c71c5eb610e5f1 (diff) | |
| parent | 3c92ec8ae91ecf59d88c798301833d7cf83f2179 (diff) | |
[XFS] Fix merge failures
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
fs/xfs/linux-2.6/xfs_cred.h
fs/xfs/linux-2.6/xfs_globals.h
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/xfs_vnodeops.h
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'kernel/trace/trace_stack.c')
| -rw-r--r-- | kernel/trace/trace_stack.c | 70 |
1 files changed, 55 insertions, 15 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 3bdb44bde4b7..d0871bc0aca5 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/debugfs.h> | 10 | #include <linux/debugfs.h> |
| 11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
| 12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| 13 | #include <linux/sysctl.h> | ||
| 13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 14 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 15 | #include "trace.h" | 16 | #include "trace.h" |
| @@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock = | |||
| 31 | 32 | ||
| 32 | static int stack_trace_disabled __read_mostly; | 33 | static int stack_trace_disabled __read_mostly; |
| 33 | static DEFINE_PER_CPU(int, trace_active); | 34 | static DEFINE_PER_CPU(int, trace_active); |
| 35 | static DEFINE_MUTEX(stack_sysctl_mutex); | ||
| 36 | |||
| 37 | int stack_tracer_enabled; | ||
| 38 | static int last_stack_tracer_enabled; | ||
| 34 | 39 | ||
| 35 | static inline void check_stack(void) | 40 | static inline void check_stack(void) |
| 36 | { | 41 | { |
| @@ -48,7 +53,7 @@ static inline void check_stack(void) | |||
| 48 | if (!object_is_on_stack(&this_size)) | 53 | if (!object_is_on_stack(&this_size)) |
| 49 | return; | 54 | return; |
| 50 | 55 | ||
| 51 | raw_local_irq_save(flags); | 56 | local_irq_save(flags); |
| 52 | __raw_spin_lock(&max_stack_lock); | 57 | __raw_spin_lock(&max_stack_lock); |
| 53 | 58 | ||
| 54 | /* a race could have already updated it */ | 59 | /* a race could have already updated it */ |
| @@ -78,6 +83,7 @@ static inline void check_stack(void) | |||
| 78 | * on a new max, so it is far from a fast path. | 83 | * on a new max, so it is far from a fast path. |
| 79 | */ | 84 | */ |
| 80 | while (i < max_stack_trace.nr_entries) { | 85 | while (i < max_stack_trace.nr_entries) { |
| 86 | int found = 0; | ||
| 81 | 87 | ||
| 82 | stack_dump_index[i] = this_size; | 88 | stack_dump_index[i] = this_size; |
| 83 | p = start; | 89 | p = start; |
| @@ -86,17 +92,19 @@ static inline void check_stack(void) | |||
| 86 | if (*p == stack_dump_trace[i]) { | 92 | if (*p == stack_dump_trace[i]) { |
| 87 | this_size = stack_dump_index[i++] = | 93 | this_size = stack_dump_index[i++] = |
| 88 | (top - p) * sizeof(unsigned long); | 94 | (top - p) * sizeof(unsigned long); |
| 95 | found = 1; | ||
| 89 | /* Start the search from here */ | 96 | /* Start the search from here */ |
| 90 | start = p + 1; | 97 | start = p + 1; |
| 91 | } | 98 | } |
| 92 | } | 99 | } |
| 93 | 100 | ||
| 94 | i++; | 101 | if (!found) |
| 102 | i++; | ||
| 95 | } | 103 | } |
| 96 | 104 | ||
| 97 | out: | 105 | out: |
| 98 | __raw_spin_unlock(&max_stack_lock); | 106 | __raw_spin_unlock(&max_stack_lock); |
| 99 | raw_local_irq_restore(flags); | 107 | local_irq_restore(flags); |
| 100 | } | 108 | } |
| 101 | 109 | ||
| 102 | static void | 110 | static void |
| @@ -107,8 +115,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 107 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
| 108 | return; | 116 | return; |
| 109 | 117 | ||
| 110 | resched = need_resched(); | 118 | resched = ftrace_preempt_disable(); |
| 111 | preempt_disable_notrace(); | ||
| 112 | 119 | ||
| 113 | cpu = raw_smp_processor_id(); | 120 | cpu = raw_smp_processor_id(); |
| 114 | /* no atomic needed, we only modify this variable by this cpu */ | 121 | /* no atomic needed, we only modify this variable by this cpu */ |
| @@ -120,10 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 120 | out: | 127 | out: |
| 121 | per_cpu(trace_active, cpu)--; | 128 | per_cpu(trace_active, cpu)--; |
| 122 | /* prevent recursion in schedule */ | 129 | /* prevent recursion in schedule */ |
| 123 | if (resched) | 130 | ftrace_preempt_enable(resched); |
| 124 | preempt_enable_no_resched_notrace(); | ||
| 125 | else | ||
| 126 | preempt_enable_notrace(); | ||
| 127 | } | 131 | } |
| 128 | 132 | ||
| 129 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |
| @@ -166,16 +170,16 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
| 166 | if (ret < 0) | 170 | if (ret < 0) |
| 167 | return ret; | 171 | return ret; |
| 168 | 172 | ||
| 169 | raw_local_irq_save(flags); | 173 | local_irq_save(flags); |
| 170 | __raw_spin_lock(&max_stack_lock); | 174 | __raw_spin_lock(&max_stack_lock); |
| 171 | *ptr = val; | 175 | *ptr = val; |
| 172 | __raw_spin_unlock(&max_stack_lock); | 176 | __raw_spin_unlock(&max_stack_lock); |
| 173 | raw_local_irq_restore(flags); | 177 | local_irq_restore(flags); |
| 174 | 178 | ||
| 175 | return count; | 179 | return count; |
| 176 | } | 180 | } |
| 177 | 181 | ||
| 178 | static struct file_operations stack_max_size_fops = { | 182 | static const struct file_operations stack_max_size_fops = { |
| 179 | .open = tracing_open_generic, | 183 | .open = tracing_open_generic, |
| 180 | .read = stack_max_size_read, | 184 | .read = stack_max_size_read, |
| 181 | .write = stack_max_size_write, | 185 | .write = stack_max_size_write, |
| @@ -273,7 +277,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 273 | return 0; | 277 | return 0; |
| 274 | } | 278 | } |
| 275 | 279 | ||
| 276 | static struct seq_operations stack_trace_seq_ops = { | 280 | static const struct seq_operations stack_trace_seq_ops = { |
| 277 | .start = t_start, | 281 | .start = t_start, |
| 278 | .next = t_next, | 282 | .next = t_next, |
| 279 | .stop = t_stop, | 283 | .stop = t_stop, |
| @@ -289,12 +293,47 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
| 289 | return ret; | 293 | return ret; |
| 290 | } | 294 | } |
| 291 | 295 | ||
| 292 | static struct file_operations stack_trace_fops = { | 296 | static const struct file_operations stack_trace_fops = { |
| 293 | .open = stack_trace_open, | 297 | .open = stack_trace_open, |
| 294 | .read = seq_read, | 298 | .read = seq_read, |
| 295 | .llseek = seq_lseek, | 299 | .llseek = seq_lseek, |
| 296 | }; | 300 | }; |
| 297 | 301 | ||
| 302 | int | ||
| 303 | stack_trace_sysctl(struct ctl_table *table, int write, | ||
| 304 | struct file *file, void __user *buffer, size_t *lenp, | ||
| 305 | loff_t *ppos) | ||
| 306 | { | ||
| 307 | int ret; | ||
| 308 | |||
| 309 | mutex_lock(&stack_sysctl_mutex); | ||
| 310 | |||
| 311 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | ||
| 312 | |||
| 313 | if (ret || !write || | ||
| 314 | (last_stack_tracer_enabled == stack_tracer_enabled)) | ||
| 315 | goto out; | ||
| 316 | |||
| 317 | last_stack_tracer_enabled = stack_tracer_enabled; | ||
| 318 | |||
| 319 | if (stack_tracer_enabled) | ||
| 320 | register_ftrace_function(&trace_ops); | ||
| 321 | else | ||
| 322 | unregister_ftrace_function(&trace_ops); | ||
| 323 | |||
| 324 | out: | ||
| 325 | mutex_unlock(&stack_sysctl_mutex); | ||
| 326 | return ret; | ||
| 327 | } | ||
| 328 | |||
| 329 | static __init int enable_stacktrace(char *str) | ||
| 330 | { | ||
| 331 | stack_tracer_enabled = 1; | ||
| 332 | last_stack_tracer_enabled = 1; | ||
| 333 | return 1; | ||
| 334 | } | ||
| 335 | __setup("stacktrace", enable_stacktrace); | ||
| 336 | |||
| 298 | static __init int stack_trace_init(void) | 337 | static __init int stack_trace_init(void) |
| 299 | { | 338 | { |
| 300 | struct dentry *d_tracer; | 339 | struct dentry *d_tracer; |
| @@ -312,7 +351,8 @@ static __init int stack_trace_init(void) | |||
| 312 | if (!entry) | 351 | if (!entry) |
| 313 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); | 352 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); |
| 314 | 353 | ||
| 315 | register_ftrace_function(&trace_ops); | 354 | if (stack_tracer_enabled) |
| 355 | register_ftrace_function(&trace_ops); | ||
| 316 | 356 | ||
| 317 | return 0; | 357 | return 0; |
| 318 | } | 358 | } |
