diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ring_buffer.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace.c | 68 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 51 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 31 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 1 |
11 files changed, 119 insertions, 98 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index eccb4cf1e998..2326b04c95c4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu { | |||
423 | int cpu; | 423 | int cpu; |
424 | struct ring_buffer *buffer; | 424 | struct ring_buffer *buffer; |
425 | spinlock_t reader_lock; /* serialize readers */ | 425 | spinlock_t reader_lock; /* serialize readers */ |
426 | raw_spinlock_t lock; | 426 | arch_spinlock_t lock; |
427 | struct lock_class_key lock_key; | 427 | struct lock_class_key lock_key; |
428 | struct list_head *pages; | 428 | struct list_head *pages; |
429 | struct buffer_page *head_page; /* read from head */ | 429 | struct buffer_page *head_page; /* read from head */ |
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
998 | cpu_buffer->buffer = buffer; | 998 | cpu_buffer->buffer = buffer; |
999 | spin_lock_init(&cpu_buffer->reader_lock); | 999 | spin_lock_init(&cpu_buffer->reader_lock); |
1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
1001 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1001 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1002 | 1002 | ||
1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1004 | GFP_KERNEL, cpu_to_node(cpu)); | 1004 | GFP_KERNEL, cpu_to_node(cpu)); |
@@ -2827,7 +2827,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2827 | int ret; | 2827 | int ret; |
2828 | 2828 | ||
2829 | local_irq_save(flags); | 2829 | local_irq_save(flags); |
2830 | __raw_spin_lock(&cpu_buffer->lock); | 2830 | arch_spin_lock(&cpu_buffer->lock); |
2831 | 2831 | ||
2832 | again: | 2832 | again: |
2833 | /* | 2833 | /* |
@@ -2916,7 +2916,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2916 | goto again; | 2916 | goto again; |
2917 | 2917 | ||
2918 | out: | 2918 | out: |
2919 | __raw_spin_unlock(&cpu_buffer->lock); | 2919 | arch_spin_unlock(&cpu_buffer->lock); |
2920 | local_irq_restore(flags); | 2920 | local_irq_restore(flags); |
2921 | 2921 | ||
2922 | return reader; | 2922 | return reader; |
@@ -3279,9 +3279,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
3279 | synchronize_sched(); | 3279 | synchronize_sched(); |
3280 | 3280 | ||
3281 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3281 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3282 | __raw_spin_lock(&cpu_buffer->lock); | 3282 | arch_spin_lock(&cpu_buffer->lock); |
3283 | rb_iter_reset(iter); | 3283 | rb_iter_reset(iter); |
3284 | __raw_spin_unlock(&cpu_buffer->lock); | 3284 | arch_spin_unlock(&cpu_buffer->lock); |
3285 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3285 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3286 | 3286 | ||
3287 | return iter; | 3287 | return iter; |
@@ -3401,11 +3401,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3401 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3401 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
3402 | goto out; | 3402 | goto out; |
3403 | 3403 | ||
3404 | __raw_spin_lock(&cpu_buffer->lock); | 3404 | arch_spin_lock(&cpu_buffer->lock); |
3405 | 3405 | ||
3406 | rb_reset_cpu(cpu_buffer); | 3406 | rb_reset_cpu(cpu_buffer); |
3407 | 3407 | ||
3408 | __raw_spin_unlock(&cpu_buffer->lock); | 3408 | arch_spin_unlock(&cpu_buffer->lock); |
3409 | 3409 | ||
3410 | out: | 3410 | out: |
3411 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3411 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d0a4c12d1f1c..0df1b0f2cb9e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
13 | */ | 13 | */ |
14 | #include <linux/ring_buffer.h> | 14 | #include <linux/ring_buffer.h> |
15 | #include <linux/utsrelease.h> | 15 | #include <generated/utsrelease.h> |
16 | #include <linux/stacktrace.h> | 16 | #include <linux/stacktrace.h> |
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
86 | */ | 86 | */ |
87 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; |
88 | 88 | ||
89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
90 | 90 | ||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) | |||
203 | */ | 203 | */ |
204 | static struct trace_array max_tr; | 204 | static struct trace_array max_tr; |
205 | 205 | ||
206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
207 | 207 | ||
208 | /* tracer_enabled is used to toggle activation of a tracer */ | 208 | /* tracer_enabled is used to toggle activation of a tracer */ |
209 | static int tracer_enabled = 1; | 209 | static int tracer_enabled = 1; |
@@ -492,15 +492,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
492 | * protected by per_cpu spinlocks. But the action of the swap | 492 | * protected by per_cpu spinlocks. But the action of the swap |
493 | * needs its own lock. | 493 | * needs its own lock. |
494 | * | 494 | * |
495 | * This is defined as a raw_spinlock_t in order to help | 495 | * This is defined as a arch_spinlock_t in order to help |
496 | * with performance when lockdep debugging is enabled. | 496 | * with performance when lockdep debugging is enabled. |
497 | * | 497 | * |
498 | * It is also used in other places outside the update_max_tr | 498 | * It is also used in other places outside the update_max_tr |
499 | * so it needs to be defined outside of the | 499 | * so it needs to be defined outside of the |
500 | * CONFIG_TRACER_MAX_TRACE. | 500 | * CONFIG_TRACER_MAX_TRACE. |
501 | */ | 501 | */ |
502 | static raw_spinlock_t ftrace_max_lock = | 502 | static arch_spinlock_t ftrace_max_lock = |
503 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 503 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
504 | 504 | ||
505 | #ifdef CONFIG_TRACER_MAX_TRACE | 505 | #ifdef CONFIG_TRACER_MAX_TRACE |
506 | unsigned long __read_mostly tracing_max_latency; | 506 | unsigned long __read_mostly tracing_max_latency; |
@@ -554,13 +554,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
554 | return; | 554 | return; |
555 | 555 | ||
556 | WARN_ON_ONCE(!irqs_disabled()); | 556 | WARN_ON_ONCE(!irqs_disabled()); |
557 | __raw_spin_lock(&ftrace_max_lock); | 557 | arch_spin_lock(&ftrace_max_lock); |
558 | 558 | ||
559 | tr->buffer = max_tr.buffer; | 559 | tr->buffer = max_tr.buffer; |
560 | max_tr.buffer = buf; | 560 | max_tr.buffer = buf; |
561 | 561 | ||
562 | __update_max_tr(tr, tsk, cpu); | 562 | __update_max_tr(tr, tsk, cpu); |
563 | __raw_spin_unlock(&ftrace_max_lock); | 563 | arch_spin_unlock(&ftrace_max_lock); |
564 | } | 564 | } |
565 | 565 | ||
566 | /** | 566 | /** |
@@ -580,7 +580,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
580 | return; | 580 | return; |
581 | 581 | ||
582 | WARN_ON_ONCE(!irqs_disabled()); | 582 | WARN_ON_ONCE(!irqs_disabled()); |
583 | __raw_spin_lock(&ftrace_max_lock); | 583 | arch_spin_lock(&ftrace_max_lock); |
584 | 584 | ||
585 | ftrace_disable_cpu(); | 585 | ftrace_disable_cpu(); |
586 | 586 | ||
@@ -602,7 +602,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
602 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 602 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
603 | 603 | ||
604 | __update_max_tr(tr, tsk, cpu); | 604 | __update_max_tr(tr, tsk, cpu); |
605 | __raw_spin_unlock(&ftrace_max_lock); | 605 | arch_spin_unlock(&ftrace_max_lock); |
606 | } | 606 | } |
607 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 607 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
608 | 608 | ||
@@ -801,7 +801,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
801 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 801 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
802 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 802 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
803 | static int cmdline_idx; | 803 | static int cmdline_idx; |
804 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 804 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
805 | 805 | ||
806 | /* temporary disable recording */ | 806 | /* temporary disable recording */ |
807 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 807 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
@@ -914,7 +914,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
914 | * nor do we want to disable interrupts, | 914 | * nor do we want to disable interrupts, |
915 | * so if we miss here, then better luck next time. | 915 | * so if we miss here, then better luck next time. |
916 | */ | 916 | */ |
917 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 917 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
918 | return; | 918 | return; |
919 | 919 | ||
920 | idx = map_pid_to_cmdline[tsk->pid]; | 920 | idx = map_pid_to_cmdline[tsk->pid]; |
@@ -939,7 +939,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
939 | 939 | ||
940 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 940 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
941 | 941 | ||
942 | __raw_spin_unlock(&trace_cmdline_lock); | 942 | arch_spin_unlock(&trace_cmdline_lock); |
943 | } | 943 | } |
944 | 944 | ||
945 | void trace_find_cmdline(int pid, char comm[]) | 945 | void trace_find_cmdline(int pid, char comm[]) |
@@ -957,14 +957,14 @@ void trace_find_cmdline(int pid, char comm[]) | |||
957 | } | 957 | } |
958 | 958 | ||
959 | preempt_disable(); | 959 | preempt_disable(); |
960 | __raw_spin_lock(&trace_cmdline_lock); | 960 | arch_spin_lock(&trace_cmdline_lock); |
961 | map = map_pid_to_cmdline[pid]; | 961 | map = map_pid_to_cmdline[pid]; |
962 | if (map != NO_CMDLINE_MAP) | 962 | if (map != NO_CMDLINE_MAP) |
963 | strcpy(comm, saved_cmdlines[map]); | 963 | strcpy(comm, saved_cmdlines[map]); |
964 | else | 964 | else |
965 | strcpy(comm, "<...>"); | 965 | strcpy(comm, "<...>"); |
966 | 966 | ||
967 | __raw_spin_unlock(&trace_cmdline_lock); | 967 | arch_spin_unlock(&trace_cmdline_lock); |
968 | preempt_enable(); | 968 | preempt_enable(); |
969 | } | 969 | } |
970 | 970 | ||
@@ -1084,7 +1084,7 @@ trace_function(struct trace_array *tr, | |||
1084 | struct ftrace_entry *entry; | 1084 | struct ftrace_entry *entry; |
1085 | 1085 | ||
1086 | /* If we are reading the ring buffer, don't trace */ | 1086 | /* If we are reading the ring buffer, don't trace */ |
1087 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1087 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
1088 | return; | 1088 | return; |
1089 | 1089 | ||
1090 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1090 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -1266,8 +1266,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
1266 | */ | 1266 | */ |
1267 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1267 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
1268 | { | 1268 | { |
1269 | static raw_spinlock_t trace_buf_lock = | 1269 | static arch_spinlock_t trace_buf_lock = |
1270 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1270 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1271 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1271 | static u32 trace_buf[TRACE_BUF_SIZE]; |
1272 | 1272 | ||
1273 | struct ftrace_event_call *call = &event_bprint; | 1273 | struct ftrace_event_call *call = &event_bprint; |
@@ -1298,7 +1298,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1298 | 1298 | ||
1299 | /* Lockdep uses trace_printk for lock tracing */ | 1299 | /* Lockdep uses trace_printk for lock tracing */ |
1300 | local_irq_save(flags); | 1300 | local_irq_save(flags); |
1301 | __raw_spin_lock(&trace_buf_lock); | 1301 | arch_spin_lock(&trace_buf_lock); |
1302 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1302 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1303 | 1303 | ||
1304 | if (len > TRACE_BUF_SIZE || len < 0) | 1304 | if (len > TRACE_BUF_SIZE || len < 0) |
@@ -1319,7 +1319,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1319 | ring_buffer_unlock_commit(buffer, event); | 1319 | ring_buffer_unlock_commit(buffer, event); |
1320 | 1320 | ||
1321 | out_unlock: | 1321 | out_unlock: |
1322 | __raw_spin_unlock(&trace_buf_lock); | 1322 | arch_spin_unlock(&trace_buf_lock); |
1323 | local_irq_restore(flags); | 1323 | local_irq_restore(flags); |
1324 | 1324 | ||
1325 | out: | 1325 | out: |
@@ -1349,7 +1349,7 @@ int trace_array_printk(struct trace_array *tr, | |||
1349 | int trace_array_vprintk(struct trace_array *tr, | 1349 | int trace_array_vprintk(struct trace_array *tr, |
1350 | unsigned long ip, const char *fmt, va_list args) | 1350 | unsigned long ip, const char *fmt, va_list args) |
1351 | { | 1351 | { |
1352 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1352 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
1353 | static char trace_buf[TRACE_BUF_SIZE]; | 1353 | static char trace_buf[TRACE_BUF_SIZE]; |
1354 | 1354 | ||
1355 | struct ftrace_event_call *call = &event_print; | 1355 | struct ftrace_event_call *call = &event_print; |
@@ -1375,7 +1375,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1375 | 1375 | ||
1376 | pause_graph_tracing(); | 1376 | pause_graph_tracing(); |
1377 | raw_local_irq_save(irq_flags); | 1377 | raw_local_irq_save(irq_flags); |
1378 | __raw_spin_lock(&trace_buf_lock); | 1378 | arch_spin_lock(&trace_buf_lock); |
1379 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1379 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1380 | 1380 | ||
1381 | size = sizeof(*entry) + len + 1; | 1381 | size = sizeof(*entry) + len + 1; |
@@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1393 | ring_buffer_unlock_commit(buffer, event); | 1393 | ring_buffer_unlock_commit(buffer, event); |
1394 | 1394 | ||
1395 | out_unlock: | 1395 | out_unlock: |
1396 | __raw_spin_unlock(&trace_buf_lock); | 1396 | arch_spin_unlock(&trace_buf_lock); |
1397 | raw_local_irq_restore(irq_flags); | 1397 | raw_local_irq_restore(irq_flags); |
1398 | unpause_graph_tracing(); | 1398 | unpause_graph_tracing(); |
1399 | out: | 1399 | out: |
@@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2294 | mutex_lock(&tracing_cpumask_update_lock); | 2294 | mutex_lock(&tracing_cpumask_update_lock); |
2295 | 2295 | ||
2296 | local_irq_disable(); | 2296 | local_irq_disable(); |
2297 | __raw_spin_lock(&ftrace_max_lock); | 2297 | arch_spin_lock(&ftrace_max_lock); |
2298 | for_each_tracing_cpu(cpu) { | 2298 | for_each_tracing_cpu(cpu) { |
2299 | /* | 2299 | /* |
2300 | * Increase/decrease the disabled counter if we are | 2300 | * Increase/decrease the disabled counter if we are |
@@ -2309,7 +2309,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2309 | atomic_dec(&global_trace.data[cpu]->disabled); | 2309 | atomic_dec(&global_trace.data[cpu]->disabled); |
2310 | } | 2310 | } |
2311 | } | 2311 | } |
2312 | __raw_spin_unlock(&ftrace_max_lock); | 2312 | arch_spin_unlock(&ftrace_max_lock); |
2313 | local_irq_enable(); | 2313 | local_irq_enable(); |
2314 | 2314 | ||
2315 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2315 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
@@ -3122,7 +3122,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
3122 | __free_page(spd->pages[idx]); | 3122 | __free_page(spd->pages[idx]); |
3123 | } | 3123 | } |
3124 | 3124 | ||
3125 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 3125 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
3126 | .can_merge = 0, | 3126 | .can_merge = 0, |
3127 | .map = generic_pipe_buf_map, | 3127 | .map = generic_pipe_buf_map, |
3128 | .unmap = generic_pipe_buf_unmap, | 3128 | .unmap = generic_pipe_buf_unmap, |
@@ -3612,7 +3612,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
3612 | } | 3612 | } |
3613 | 3613 | ||
3614 | /* Pipe buffer operations for a buffer. */ | 3614 | /* Pipe buffer operations for a buffer. */ |
3615 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 3615 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
3616 | .can_merge = 0, | 3616 | .can_merge = 0, |
3617 | .map = generic_pipe_buf_map, | 3617 | .map = generic_pipe_buf_map, |
3618 | .unmap = generic_pipe_buf_unmap, | 3618 | .unmap = generic_pipe_buf_unmap, |
@@ -4279,8 +4279,8 @@ trace_printk_seq(struct trace_seq *s) | |||
4279 | 4279 | ||
4280 | static void __ftrace_dump(bool disable_tracing) | 4280 | static void __ftrace_dump(bool disable_tracing) |
4281 | { | 4281 | { |
4282 | static raw_spinlock_t ftrace_dump_lock = | 4282 | static arch_spinlock_t ftrace_dump_lock = |
4283 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 4283 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
4284 | /* use static because iter can be a bit big for the stack */ | 4284 | /* use static because iter can be a bit big for the stack */ |
4285 | static struct trace_iterator iter; | 4285 | static struct trace_iterator iter; |
4286 | unsigned int old_userobj; | 4286 | unsigned int old_userobj; |
@@ -4290,7 +4290,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4290 | 4290 | ||
4291 | /* only one dump */ | 4291 | /* only one dump */ |
4292 | local_irq_save(flags); | 4292 | local_irq_save(flags); |
4293 | __raw_spin_lock(&ftrace_dump_lock); | 4293 | arch_spin_lock(&ftrace_dump_lock); |
4294 | if (dump_ran) | 4294 | if (dump_ran) |
4295 | goto out; | 4295 | goto out; |
4296 | 4296 | ||
@@ -4365,7 +4365,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4365 | } | 4365 | } |
4366 | 4366 | ||
4367 | out: | 4367 | out: |
4368 | __raw_spin_unlock(&ftrace_dump_lock); | 4368 | arch_spin_unlock(&ftrace_dump_lock); |
4369 | local_irq_restore(flags); | 4369 | local_irq_restore(flags); |
4370 | } | 4370 | } |
4371 | 4371 | ||
@@ -4426,7 +4426,7 @@ __init static int tracer_alloc_buffers(void) | |||
4426 | /* Allocate the first page for all buffers */ | 4426 | /* Allocate the first page for all buffers */ |
4427 | for_each_tracing_cpu(i) { | 4427 | for_each_tracing_cpu(i) { |
4428 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4428 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
4429 | max_tr.data[i] = &per_cpu(max_data, i); | 4429 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4430 | } | 4430 | } |
4431 | 4431 | ||
4432 | trace_init_cmdlines(); | 4432 | trace_init_cmdlines(); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 1b18cb240c16..4df6a77eb196 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -443,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void); | |||
443 | 443 | ||
444 | extern int ring_buffer_expanded; | 444 | extern int ring_buffer_expanded; |
445 | extern bool tracing_selftest_disabled; | 445 | extern bool tracing_selftest_disabled; |
446 | DECLARE_PER_CPU(local_t, ftrace_cpu_disabled); | 446 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); |
447 | 447 | ||
448 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 448 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
449 | extern int trace_selftest_startup_function(struct tracer *trace, | 449 | extern int trace_selftest_startup_function(struct tracer *trace, |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 878c03f386ba..84a3a7ba072a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void) | |||
71 | /* keep prev_time and lock in the same cacheline. */ | 71 | /* keep prev_time and lock in the same cacheline. */ |
72 | static struct { | 72 | static struct { |
73 | u64 prev_time; | 73 | u64 prev_time; |
74 | raw_spinlock_t lock; | 74 | arch_spinlock_t lock; |
75 | } trace_clock_struct ____cacheline_aligned_in_smp = | 75 | } trace_clock_struct ____cacheline_aligned_in_smp = |
76 | { | 76 | { |
77 | .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, | 77 | .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, |
78 | }; | 78 | }; |
79 | 79 | ||
80 | u64 notrace trace_clock_global(void) | 80 | u64 notrace trace_clock_global(void) |
@@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void) | |||
94 | if (unlikely(in_nmi())) | 94 | if (unlikely(in_nmi())) |
95 | goto out; | 95 | goto out; |
96 | 96 | ||
97 | __raw_spin_lock(&trace_clock_struct.lock); | 97 | arch_spin_lock(&trace_clock_struct.lock); |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * TODO: if this happens often then maybe we should reset | 100 | * TODO: if this happens often then maybe we should reset |
@@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void) | |||
106 | 106 | ||
107 | trace_clock_struct.prev_time = now; | 107 | trace_clock_struct.prev_time = now; |
108 | 108 | ||
109 | __raw_spin_unlock(&trace_clock_struct.lock); | 109 | arch_spin_unlock(&trace_clock_struct.lock); |
110 | 110 | ||
111 | out: | 111 | out: |
112 | raw_local_irq_restore(flags); | 112 | raw_local_irq_restore(flags); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index a43d009c561a..b1342c5d37cf 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
187 | struct ring_buffer *buffer = tr->buffer; | 187 | struct ring_buffer *buffer = tr->buffer; |
188 | struct ftrace_graph_ent_entry *entry; | 188 | struct ftrace_graph_ent_entry *entry; |
189 | 189 | ||
190 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 190 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
191 | return 0; | 191 | return 0; |
192 | 192 | ||
193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
@@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
251 | struct ring_buffer *buffer = tr->buffer; | 251 | struct ring_buffer *buffer = tr->buffer; |
252 | struct ftrace_graph_ret_entry *entry; | 252 | struct ftrace_graph_ret_entry *entry; |
253 | 253 | ||
254 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 254 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
255 | return; | 255 | return; |
256 | 256 | ||
257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 69543a905cd5..7b97000745f5 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -20,10 +20,10 @@ | |||
20 | 20 | ||
21 | #define BTS_BUFFER_SIZE (1 << 13) | 21 | #define BTS_BUFFER_SIZE (1 << 13) |
22 | 22 | ||
23 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 23 | static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer); |
24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); | 24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer); |
25 | 25 | ||
26 | #define this_tracer per_cpu(tracer, smp_processor_id()) | 26 | #define this_tracer per_cpu(hwb_tracer, smp_processor_id()) |
27 | 27 | ||
28 | static int trace_hw_branches_enabled __read_mostly; | 28 | static int trace_hw_branches_enabled __read_mostly; |
29 | static int trace_hw_branches_suspended __read_mostly; | 29 | static int trace_hw_branches_suspended __read_mostly; |
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly; | |||
32 | 32 | ||
33 | static void bts_trace_init_cpu(int cpu) | 33 | static void bts_trace_init_cpu(int cpu) |
34 | { | 34 | { |
35 | per_cpu(tracer, cpu) = | 35 | per_cpu(hwb_tracer, cpu) = |
36 | ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE, | 36 | ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu), |
37 | NULL, (size_t)-1, BTS_KERNEL); | 37 | BTS_BUFFER_SIZE, NULL, (size_t)-1, |
38 | BTS_KERNEL); | ||
38 | 39 | ||
39 | if (IS_ERR(per_cpu(tracer, cpu))) | 40 | if (IS_ERR(per_cpu(hwb_tracer, cpu))) |
40 | per_cpu(tracer, cpu) = NULL; | 41 | per_cpu(hwb_tracer, cpu) = NULL; |
41 | } | 42 | } |
42 | 43 | ||
43 | static int bts_trace_init(struct trace_array *tr) | 44 | static int bts_trace_init(struct trace_array *tr) |
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr) | |||
51 | for_each_online_cpu(cpu) { | 52 | for_each_online_cpu(cpu) { |
52 | bts_trace_init_cpu(cpu); | 53 | bts_trace_init_cpu(cpu); |
53 | 54 | ||
54 | if (likely(per_cpu(tracer, cpu))) | 55 | if (likely(per_cpu(hwb_tracer, cpu))) |
55 | trace_hw_branches_enabled = 1; | 56 | trace_hw_branches_enabled = 1; |
56 | } | 57 | } |
57 | trace_hw_branches_suspended = 0; | 58 | trace_hw_branches_suspended = 0; |
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr) | |||
67 | 68 | ||
68 | get_online_cpus(); | 69 | get_online_cpus(); |
69 | for_each_online_cpu(cpu) { | 70 | for_each_online_cpu(cpu) { |
70 | if (likely(per_cpu(tracer, cpu))) { | 71 | if (likely(per_cpu(hwb_tracer, cpu))) { |
71 | ds_release_bts(per_cpu(tracer, cpu)); | 72 | ds_release_bts(per_cpu(hwb_tracer, cpu)); |
72 | per_cpu(tracer, cpu) = NULL; | 73 | per_cpu(hwb_tracer, cpu) = NULL; |
73 | } | 74 | } |
74 | } | 75 | } |
75 | trace_hw_branches_enabled = 0; | 76 | trace_hw_branches_enabled = 0; |
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr) | |||
83 | 84 | ||
84 | get_online_cpus(); | 85 | get_online_cpus(); |
85 | for_each_online_cpu(cpu) | 86 | for_each_online_cpu(cpu) |
86 | if (likely(per_cpu(tracer, cpu))) | 87 | if (likely(per_cpu(hwb_tracer, cpu))) |
87 | ds_resume_bts(per_cpu(tracer, cpu)); | 88 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
88 | trace_hw_branches_suspended = 0; | 89 | trace_hw_branches_suspended = 0; |
89 | put_online_cpus(); | 90 | put_online_cpus(); |
90 | } | 91 | } |
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr) | |||
95 | 96 | ||
96 | get_online_cpus(); | 97 | get_online_cpus(); |
97 | for_each_online_cpu(cpu) | 98 | for_each_online_cpu(cpu) |
98 | if (likely(per_cpu(tracer, cpu))) | 99 | if (likely(per_cpu(hwb_tracer, cpu))) |
99 | ds_suspend_bts(per_cpu(tracer, cpu)); | 100 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
100 | trace_hw_branches_suspended = 1; | 101 | trace_hw_branches_suspended = 1; |
101 | put_online_cpus(); | 102 | put_online_cpus(); |
102 | } | 103 | } |
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | |||
114 | bts_trace_init_cpu(cpu); | 115 | bts_trace_init_cpu(cpu); |
115 | 116 | ||
116 | if (trace_hw_branches_suspended && | 117 | if (trace_hw_branches_suspended && |
117 | likely(per_cpu(tracer, cpu))) | 118 | likely(per_cpu(hwb_tracer, cpu))) |
118 | ds_suspend_bts(per_cpu(tracer, cpu)); | 119 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
119 | } | 120 | } |
120 | break; | 121 | break; |
121 | 122 | ||
122 | case CPU_DOWN_PREPARE: | 123 | case CPU_DOWN_PREPARE: |
123 | /* The notification is sent with interrupts enabled. */ | 124 | /* The notification is sent with interrupts enabled. */ |
124 | if (likely(per_cpu(tracer, cpu))) { | 125 | if (likely(per_cpu(hwb_tracer, cpu))) { |
125 | ds_release_bts(per_cpu(tracer, cpu)); | 126 | ds_release_bts(per_cpu(hwb_tracer, cpu)); |
126 | per_cpu(tracer, cpu) = NULL; | 127 | per_cpu(hwb_tracer, cpu) = NULL; |
127 | } | 128 | } |
128 | } | 129 | } |
129 | 130 | ||
@@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
258 | 259 | ||
259 | get_online_cpus(); | 260 | get_online_cpus(); |
260 | for_each_online_cpu(cpu) | 261 | for_each_online_cpu(cpu) |
261 | if (likely(per_cpu(tracer, cpu))) | 262 | if (likely(per_cpu(hwb_tracer, cpu))) |
262 | ds_suspend_bts(per_cpu(tracer, cpu)); | 263 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
263 | /* | 264 | /* |
264 | * We need to collect the trace on the respective cpu since ftrace | 265 | * We need to collect the trace on the respective cpu since ftrace |
265 | * implicitly adds the record for the current cpu. | 266 | * implicitly adds the record for the current cpu. |
@@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
268 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | 269 | on_each_cpu(trace_bts_cpu, iter->tr, 1); |
269 | 270 | ||
270 | for_each_online_cpu(cpu) | 271 | for_each_online_cpu(cpu) |
271 | if (likely(per_cpu(tracer, cpu))) | 272 | if (likely(per_cpu(hwb_tracer, cpu))) |
272 | ds_resume_bts(per_cpu(tracer, cpu)); | 273 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
273 | put_online_cpus(); | 274 | put_online_cpus(); |
274 | } | 275 | } |
275 | 276 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 83f1e6ef7063..6ea90c0e2c96 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -282,6 +282,18 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); | |||
282 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, | 282 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, |
283 | struct pt_regs *regs); | 283 | struct pt_regs *regs); |
284 | 284 | ||
285 | /* Check the name is good for event/group */ | ||
286 | static int check_event_name(const char *name) | ||
287 | { | ||
288 | if (!isalpha(*name) && *name != '_') | ||
289 | return 0; | ||
290 | while (*++name != '\0') { | ||
291 | if (!isalpha(*name) && !isdigit(*name) && *name != '_') | ||
292 | return 0; | ||
293 | } | ||
294 | return 1; | ||
295 | } | ||
296 | |||
285 | /* | 297 | /* |
286 | * Allocate new trace_probe and initialize it (including kprobes). | 298 | * Allocate new trace_probe and initialize it (including kprobes). |
287 | */ | 299 | */ |
@@ -293,10 +305,11 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
293 | int nargs, int is_return) | 305 | int nargs, int is_return) |
294 | { | 306 | { |
295 | struct trace_probe *tp; | 307 | struct trace_probe *tp; |
308 | int ret = -ENOMEM; | ||
296 | 309 | ||
297 | tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); | 310 | tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); |
298 | if (!tp) | 311 | if (!tp) |
299 | return ERR_PTR(-ENOMEM); | 312 | return ERR_PTR(ret); |
300 | 313 | ||
301 | if (symbol) { | 314 | if (symbol) { |
302 | tp->symbol = kstrdup(symbol, GFP_KERNEL); | 315 | tp->symbol = kstrdup(symbol, GFP_KERNEL); |
@@ -312,14 +325,20 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
312 | else | 325 | else |
313 | tp->rp.kp.pre_handler = kprobe_dispatcher; | 326 | tp->rp.kp.pre_handler = kprobe_dispatcher; |
314 | 327 | ||
315 | if (!event) | 328 | if (!event || !check_event_name(event)) { |
329 | ret = -EINVAL; | ||
316 | goto error; | 330 | goto error; |
331 | } | ||
332 | |||
317 | tp->call.name = kstrdup(event, GFP_KERNEL); | 333 | tp->call.name = kstrdup(event, GFP_KERNEL); |
318 | if (!tp->call.name) | 334 | if (!tp->call.name) |
319 | goto error; | 335 | goto error; |
320 | 336 | ||
321 | if (!group) | 337 | if (!group || !check_event_name(group)) { |
338 | ret = -EINVAL; | ||
322 | goto error; | 339 | goto error; |
340 | } | ||
341 | |||
323 | tp->call.system = kstrdup(group, GFP_KERNEL); | 342 | tp->call.system = kstrdup(group, GFP_KERNEL); |
324 | if (!tp->call.system) | 343 | if (!tp->call.system) |
325 | goto error; | 344 | goto error; |
@@ -330,7 +349,7 @@ error: | |||
330 | kfree(tp->call.name); | 349 | kfree(tp->call.name); |
331 | kfree(tp->symbol); | 350 | kfree(tp->symbol); |
332 | kfree(tp); | 351 | kfree(tp); |
333 | return ERR_PTR(-ENOMEM); | 352 | return ERR_PTR(ret); |
334 | } | 353 | } |
335 | 354 | ||
336 | static void free_probe_arg(struct probe_arg *arg) | 355 | static void free_probe_arg(struct probe_arg *arg) |
@@ -695,10 +714,10 @@ static int create_trace_probe(int argc, char **argv) | |||
695 | if (!event) { | 714 | if (!event) { |
696 | /* Make a new event name */ | 715 | /* Make a new event name */ |
697 | if (symbol) | 716 | if (symbol) |
698 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld", | 717 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", |
699 | is_return ? 'r' : 'p', symbol, offset); | 718 | is_return ? 'r' : 'p', symbol, offset); |
700 | else | 719 | else |
701 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p", | 720 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", |
702 | is_return ? 'r' : 'p', addr); | 721 | is_return ? 'r' : 'p', addr); |
703 | event = buf; | 722 | event = buf; |
704 | } | 723 | } |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 26185d727676..0271742abb8d 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -28,8 +28,8 @@ static int wakeup_current_cpu; | |||
28 | static unsigned wakeup_prio = -1; | 28 | static unsigned wakeup_prio = -1; |
29 | static int wakeup_rt; | 29 | static int wakeup_rt; |
30 | 30 | ||
31 | static raw_spinlock_t wakeup_lock = | 31 | static arch_spinlock_t wakeup_lock = |
32 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
33 | 33 | ||
34 | static void __wakeup_reset(struct trace_array *tr); | 34 | static void __wakeup_reset(struct trace_array *tr); |
35 | 35 | ||
@@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
143 | goto out; | 143 | goto out; |
144 | 144 | ||
145 | local_irq_save(flags); | 145 | local_irq_save(flags); |
146 | __raw_spin_lock(&wakeup_lock); | 146 | arch_spin_lock(&wakeup_lock); |
147 | 147 | ||
148 | /* We could race with grabbing wakeup_lock */ | 148 | /* We could race with grabbing wakeup_lock */ |
149 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 149 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
@@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
169 | 169 | ||
170 | out_unlock: | 170 | out_unlock: |
171 | __wakeup_reset(wakeup_trace); | 171 | __wakeup_reset(wakeup_trace); |
172 | __raw_spin_unlock(&wakeup_lock); | 172 | arch_spin_unlock(&wakeup_lock); |
173 | local_irq_restore(flags); | 173 | local_irq_restore(flags); |
174 | out: | 174 | out: |
175 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 175 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
@@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr) | |||
193 | tracing_reset_online_cpus(tr); | 193 | tracing_reset_online_cpus(tr); |
194 | 194 | ||
195 | local_irq_save(flags); | 195 | local_irq_save(flags); |
196 | __raw_spin_lock(&wakeup_lock); | 196 | arch_spin_lock(&wakeup_lock); |
197 | __wakeup_reset(tr); | 197 | __wakeup_reset(tr); |
198 | __raw_spin_unlock(&wakeup_lock); | 198 | arch_spin_unlock(&wakeup_lock); |
199 | local_irq_restore(flags); | 199 | local_irq_restore(flags); |
200 | } | 200 | } |
201 | 201 | ||
@@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
225 | goto out; | 225 | goto out; |
226 | 226 | ||
227 | /* interrupts should be off from try_to_wake_up */ | 227 | /* interrupts should be off from try_to_wake_up */ |
228 | __raw_spin_lock(&wakeup_lock); | 228 | arch_spin_lock(&wakeup_lock); |
229 | 229 | ||
230 | /* check for races. */ | 230 | /* check for races. */ |
231 | if (!tracer_enabled || p->prio >= wakeup_prio) | 231 | if (!tracer_enabled || p->prio >= wakeup_prio) |
@@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
256 | 256 | ||
257 | out_locked: | 257 | out_locked: |
258 | __raw_spin_unlock(&wakeup_lock); | 258 | arch_spin_unlock(&wakeup_lock); |
259 | out: | 259 | out: |
260 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 260 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
261 | } | 261 | } |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index dc98309e839a..280fea470d67 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
67 | 67 | ||
68 | /* Don't allow flipping of max traces now */ | 68 | /* Don't allow flipping of max traces now */ |
69 | local_irq_save(flags); | 69 | local_irq_save(flags); |
70 | __raw_spin_lock(&ftrace_max_lock); | 70 | arch_spin_lock(&ftrace_max_lock); |
71 | 71 | ||
72 | cnt = ring_buffer_entries(tr->buffer); | 72 | cnt = ring_buffer_entries(tr->buffer); |
73 | 73 | ||
@@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
85 | break; | 85 | break; |
86 | } | 86 | } |
87 | tracing_on(); | 87 | tracing_on(); |
88 | __raw_spin_unlock(&ftrace_max_lock); | 88 | arch_spin_unlock(&ftrace_max_lock); |
89 | local_irq_restore(flags); | 89 | local_irq_restore(flags); |
90 | 90 | ||
91 | if (count) | 91 | if (count) |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 8504ac71e4e8..678a5120ee30 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = { | |||
27 | }; | 27 | }; |
28 | 28 | ||
29 | static unsigned long max_stack_size; | 29 | static unsigned long max_stack_size; |
30 | static raw_spinlock_t max_stack_lock = | 30 | static arch_spinlock_t max_stack_lock = |
31 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 31 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
32 | 32 | ||
33 | static int stack_trace_disabled __read_mostly; | 33 | static int stack_trace_disabled __read_mostly; |
34 | static DEFINE_PER_CPU(int, trace_active); | 34 | static DEFINE_PER_CPU(int, trace_active); |
@@ -54,7 +54,7 @@ static inline void check_stack(void) | |||
54 | return; | 54 | return; |
55 | 55 | ||
56 | local_irq_save(flags); | 56 | local_irq_save(flags); |
57 | __raw_spin_lock(&max_stack_lock); | 57 | arch_spin_lock(&max_stack_lock); |
58 | 58 | ||
59 | /* a race could have already updated it */ | 59 | /* a race could have already updated it */ |
60 | if (this_size <= max_stack_size) | 60 | if (this_size <= max_stack_size) |
@@ -103,7 +103,7 @@ static inline void check_stack(void) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | out: | 105 | out: |
106 | __raw_spin_unlock(&max_stack_lock); | 106 | arch_spin_unlock(&max_stack_lock); |
107 | local_irq_restore(flags); | 107 | local_irq_restore(flags); |
108 | } | 108 | } |
109 | 109 | ||
@@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
171 | return ret; | 171 | return ret; |
172 | 172 | ||
173 | local_irq_save(flags); | 173 | local_irq_save(flags); |
174 | __raw_spin_lock(&max_stack_lock); | 174 | arch_spin_lock(&max_stack_lock); |
175 | *ptr = val; | 175 | *ptr = val; |
176 | __raw_spin_unlock(&max_stack_lock); | 176 | arch_spin_unlock(&max_stack_lock); |
177 | local_irq_restore(flags); | 177 | local_irq_restore(flags); |
178 | 178 | ||
179 | return count; | 179 | return count; |
@@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
207 | static void *t_start(struct seq_file *m, loff_t *pos) | 207 | static void *t_start(struct seq_file *m, loff_t *pos) |
208 | { | 208 | { |
209 | local_irq_disable(); | 209 | local_irq_disable(); |
210 | __raw_spin_lock(&max_stack_lock); | 210 | arch_spin_lock(&max_stack_lock); |
211 | 211 | ||
212 | if (*pos == 0) | 212 | if (*pos == 0) |
213 | return SEQ_START_TOKEN; | 213 | return SEQ_START_TOKEN; |
@@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
217 | 217 | ||
218 | static void t_stop(struct seq_file *m, void *p) | 218 | static void t_stop(struct seq_file *m, void *p) |
219 | { | 219 | { |
220 | __raw_spin_unlock(&max_stack_lock); | 220 | arch_spin_unlock(&max_stack_lock); |
221 | local_irq_enable(); | 221 | local_irq_enable(); |
222 | } | 222 | } |
223 | 223 | ||
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index f6693969287d..a7974a552ca9 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -93,6 +93,7 @@ static const struct stacktrace_ops backtrace_ops = { | |||
93 | .warning_symbol = backtrace_warning_symbol, | 93 | .warning_symbol = backtrace_warning_symbol, |
94 | .stack = backtrace_stack, | 94 | .stack = backtrace_stack, |
95 | .address = backtrace_address, | 95 | .address = backtrace_address, |
96 | .walk_stack = print_context_stack, | ||
96 | }; | 97 | }; |
97 | 98 | ||
98 | static int | 99 | static int |