diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ring_buffer.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace.c | 62 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 51 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 16 |
9 files changed, 90 insertions, 89 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a1ca4956ab5e..f58c9ad15830 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu { | |||
423 | int cpu; | 423 | int cpu; |
424 | struct ring_buffer *buffer; | 424 | struct ring_buffer *buffer; |
425 | spinlock_t reader_lock; /* serialize readers */ | 425 | spinlock_t reader_lock; /* serialize readers */ |
426 | raw_spinlock_t lock; | 426 | arch_spinlock_t lock; |
427 | struct lock_class_key lock_key; | 427 | struct lock_class_key lock_key; |
428 | struct list_head *pages; | 428 | struct list_head *pages; |
429 | struct buffer_page *head_page; /* read from head */ | 429 | struct buffer_page *head_page; /* read from head */ |
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
998 | cpu_buffer->buffer = buffer; | 998 | cpu_buffer->buffer = buffer; |
999 | spin_lock_init(&cpu_buffer->reader_lock); | 999 | spin_lock_init(&cpu_buffer->reader_lock); |
1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
1001 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1001 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1002 | 1002 | ||
1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1004 | GFP_KERNEL, cpu_to_node(cpu)); | 1004 | GFP_KERNEL, cpu_to_node(cpu)); |
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2834 | int ret; | 2834 | int ret; |
2835 | 2835 | ||
2836 | local_irq_save(flags); | 2836 | local_irq_save(flags); |
2837 | __raw_spin_lock(&cpu_buffer->lock); | 2837 | arch_spin_lock(&cpu_buffer->lock); |
2838 | 2838 | ||
2839 | again: | 2839 | again: |
2840 | /* | 2840 | /* |
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2923 | goto again; | 2923 | goto again; |
2924 | 2924 | ||
2925 | out: | 2925 | out: |
2926 | __raw_spin_unlock(&cpu_buffer->lock); | 2926 | arch_spin_unlock(&cpu_buffer->lock); |
2927 | local_irq_restore(flags); | 2927 | local_irq_restore(flags); |
2928 | 2928 | ||
2929 | return reader; | 2929 | return reader; |
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
3286 | synchronize_sched(); | 3286 | synchronize_sched(); |
3287 | 3287 | ||
3288 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3288 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3289 | __raw_spin_lock(&cpu_buffer->lock); | 3289 | arch_spin_lock(&cpu_buffer->lock); |
3290 | rb_iter_reset(iter); | 3290 | rb_iter_reset(iter); |
3291 | __raw_spin_unlock(&cpu_buffer->lock); | 3291 | arch_spin_unlock(&cpu_buffer->lock); |
3292 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3292 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3293 | 3293 | ||
3294 | return iter; | 3294 | return iter; |
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3408 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3408 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
3409 | goto out; | 3409 | goto out; |
3410 | 3410 | ||
3411 | __raw_spin_lock(&cpu_buffer->lock); | 3411 | arch_spin_lock(&cpu_buffer->lock); |
3412 | 3412 | ||
3413 | rb_reset_cpu(cpu_buffer); | 3413 | rb_reset_cpu(cpu_buffer); |
3414 | 3414 | ||
3415 | __raw_spin_unlock(&cpu_buffer->lock); | 3415 | arch_spin_unlock(&cpu_buffer->lock); |
3416 | 3416 | ||
3417 | out: | 3417 | out: |
3418 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3418 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 88bd9ae2a9ed..bb6b5e7fa2a2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
86 | */ | 86 | */ |
87 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; |
88 | 88 | ||
89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
90 | 90 | ||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) | |||
203 | */ | 203 | */ |
204 | static struct trace_array max_tr; | 204 | static struct trace_array max_tr; |
205 | 205 | ||
206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
207 | 207 | ||
208 | /* tracer_enabled is used to toggle activation of a tracer */ | 208 | /* tracer_enabled is used to toggle activation of a tracer */ |
209 | static int tracer_enabled = 1; | 209 | static int tracer_enabled = 1; |
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
493 | * protected by per_cpu spinlocks. But the action of the swap | 493 | * protected by per_cpu spinlocks. But the action of the swap |
494 | * needs its own lock. | 494 | * needs its own lock. |
495 | * | 495 | * |
496 | * This is defined as a raw_spinlock_t in order to help | 496 | * This is defined as a arch_spinlock_t in order to help |
497 | * with performance when lockdep debugging is enabled. | 497 | * with performance when lockdep debugging is enabled. |
498 | * | 498 | * |
499 | * It is also used in other places outside the update_max_tr | 499 | * It is also used in other places outside the update_max_tr |
500 | * so it needs to be defined outside of the | 500 | * so it needs to be defined outside of the |
501 | * CONFIG_TRACER_MAX_TRACE. | 501 | * CONFIG_TRACER_MAX_TRACE. |
502 | */ | 502 | */ |
503 | static raw_spinlock_t ftrace_max_lock = | 503 | static arch_spinlock_t ftrace_max_lock = |
504 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 504 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
505 | 505 | ||
506 | #ifdef CONFIG_TRACER_MAX_TRACE | 506 | #ifdef CONFIG_TRACER_MAX_TRACE |
507 | unsigned long __read_mostly tracing_max_latency; | 507 | unsigned long __read_mostly tracing_max_latency; |
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
555 | return; | 555 | return; |
556 | 556 | ||
557 | WARN_ON_ONCE(!irqs_disabled()); | 557 | WARN_ON_ONCE(!irqs_disabled()); |
558 | __raw_spin_lock(&ftrace_max_lock); | 558 | arch_spin_lock(&ftrace_max_lock); |
559 | 559 | ||
560 | tr->buffer = max_tr.buffer; | 560 | tr->buffer = max_tr.buffer; |
561 | max_tr.buffer = buf; | 561 | max_tr.buffer = buf; |
562 | 562 | ||
563 | __update_max_tr(tr, tsk, cpu); | 563 | __update_max_tr(tr, tsk, cpu); |
564 | __raw_spin_unlock(&ftrace_max_lock); | 564 | arch_spin_unlock(&ftrace_max_lock); |
565 | } | 565 | } |
566 | 566 | ||
567 | /** | 567 | /** |
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
581 | return; | 581 | return; |
582 | 582 | ||
583 | WARN_ON_ONCE(!irqs_disabled()); | 583 | WARN_ON_ONCE(!irqs_disabled()); |
584 | __raw_spin_lock(&ftrace_max_lock); | 584 | arch_spin_lock(&ftrace_max_lock); |
585 | 585 | ||
586 | ftrace_disable_cpu(); | 586 | ftrace_disable_cpu(); |
587 | 587 | ||
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
604 | 604 | ||
605 | __update_max_tr(tr, tsk, cpu); | 605 | __update_max_tr(tr, tsk, cpu); |
606 | __raw_spin_unlock(&ftrace_max_lock); | 606 | arch_spin_unlock(&ftrace_max_lock); |
607 | } | 607 | } |
608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 608 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
609 | 609 | ||
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
804 | static int cmdline_idx; | 804 | static int cmdline_idx; |
805 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 805 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
806 | 806 | ||
807 | /* temporary disable recording */ | 807 | /* temporary disable recording */ |
808 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 808 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
915 | * nor do we want to disable interrupts, | 915 | * nor do we want to disable interrupts, |
916 | * so if we miss here, then better luck next time. | 916 | * so if we miss here, then better luck next time. |
917 | */ | 917 | */ |
918 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 918 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
919 | return; | 919 | return; |
920 | 920 | ||
921 | idx = map_pid_to_cmdline[tsk->pid]; | 921 | idx = map_pid_to_cmdline[tsk->pid]; |
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
940 | 940 | ||
941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
942 | 942 | ||
943 | __raw_spin_unlock(&trace_cmdline_lock); | 943 | arch_spin_unlock(&trace_cmdline_lock); |
944 | } | 944 | } |
945 | 945 | ||
946 | void trace_find_cmdline(int pid, char comm[]) | 946 | void trace_find_cmdline(int pid, char comm[]) |
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[]) | |||
958 | } | 958 | } |
959 | 959 | ||
960 | preempt_disable(); | 960 | preempt_disable(); |
961 | __raw_spin_lock(&trace_cmdline_lock); | 961 | arch_spin_lock(&trace_cmdline_lock); |
962 | map = map_pid_to_cmdline[pid]; | 962 | map = map_pid_to_cmdline[pid]; |
963 | if (map != NO_CMDLINE_MAP) | 963 | if (map != NO_CMDLINE_MAP) |
964 | strcpy(comm, saved_cmdlines[map]); | 964 | strcpy(comm, saved_cmdlines[map]); |
965 | else | 965 | else |
966 | strcpy(comm, "<...>"); | 966 | strcpy(comm, "<...>"); |
967 | 967 | ||
968 | __raw_spin_unlock(&trace_cmdline_lock); | 968 | arch_spin_unlock(&trace_cmdline_lock); |
969 | preempt_enable(); | 969 | preempt_enable(); |
970 | } | 970 | } |
971 | 971 | ||
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr, | |||
1085 | struct ftrace_entry *entry; | 1085 | struct ftrace_entry *entry; |
1086 | 1086 | ||
1087 | /* If we are reading the ring buffer, don't trace */ | 1087 | /* If we are reading the ring buffer, don't trace */ |
1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1088 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
1089 | return; | 1089 | return; |
1090 | 1090 | ||
1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
1251 | */ | 1251 | */ |
1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
1253 | { | 1253 | { |
1254 | static raw_spinlock_t trace_buf_lock = | 1254 | static arch_spinlock_t trace_buf_lock = |
1255 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1255 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1256 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1256 | static u32 trace_buf[TRACE_BUF_SIZE]; |
1257 | 1257 | ||
1258 | struct ftrace_event_call *call = &event_bprint; | 1258 | struct ftrace_event_call *call = &event_bprint; |
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1283 | 1283 | ||
1284 | /* Lockdep uses trace_printk for lock tracing */ | 1284 | /* Lockdep uses trace_printk for lock tracing */ |
1285 | local_irq_save(flags); | 1285 | local_irq_save(flags); |
1286 | __raw_spin_lock(&trace_buf_lock); | 1286 | arch_spin_lock(&trace_buf_lock); |
1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1288 | 1288 | ||
1289 | if (len > TRACE_BUF_SIZE || len < 0) | 1289 | if (len > TRACE_BUF_SIZE || len < 0) |
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1304 | ring_buffer_unlock_commit(buffer, event); | 1304 | ring_buffer_unlock_commit(buffer, event); |
1305 | 1305 | ||
1306 | out_unlock: | 1306 | out_unlock: |
1307 | __raw_spin_unlock(&trace_buf_lock); | 1307 | arch_spin_unlock(&trace_buf_lock); |
1308 | local_irq_restore(flags); | 1308 | local_irq_restore(flags); |
1309 | 1309 | ||
1310 | out: | 1310 | out: |
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr, | |||
1334 | int trace_array_vprintk(struct trace_array *tr, | 1334 | int trace_array_vprintk(struct trace_array *tr, |
1335 | unsigned long ip, const char *fmt, va_list args) | 1335 | unsigned long ip, const char *fmt, va_list args) |
1336 | { | 1336 | { |
1337 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1337 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
1338 | static char trace_buf[TRACE_BUF_SIZE]; | 1338 | static char trace_buf[TRACE_BUF_SIZE]; |
1339 | 1339 | ||
1340 | struct ftrace_event_call *call = &event_print; | 1340 | struct ftrace_event_call *call = &event_print; |
@@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1360 | 1360 | ||
1361 | pause_graph_tracing(); | 1361 | pause_graph_tracing(); |
1362 | raw_local_irq_save(irq_flags); | 1362 | raw_local_irq_save(irq_flags); |
1363 | __raw_spin_lock(&trace_buf_lock); | 1363 | arch_spin_lock(&trace_buf_lock); |
1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1365 | 1365 | ||
1366 | size = sizeof(*entry) + len + 1; | 1366 | size = sizeof(*entry) + len + 1; |
@@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1378 | ring_buffer_unlock_commit(buffer, event); | 1378 | ring_buffer_unlock_commit(buffer, event); |
1379 | 1379 | ||
1380 | out_unlock: | 1380 | out_unlock: |
1381 | __raw_spin_unlock(&trace_buf_lock); | 1381 | arch_spin_unlock(&trace_buf_lock); |
1382 | raw_local_irq_restore(irq_flags); | 1382 | raw_local_irq_restore(irq_flags); |
1383 | unpause_graph_tracing(); | 1383 | unpause_graph_tracing(); |
1384 | out: | 1384 | out: |
@@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2279 | mutex_lock(&tracing_cpumask_update_lock); | 2279 | mutex_lock(&tracing_cpumask_update_lock); |
2280 | 2280 | ||
2281 | local_irq_disable(); | 2281 | local_irq_disable(); |
2282 | __raw_spin_lock(&ftrace_max_lock); | 2282 | arch_spin_lock(&ftrace_max_lock); |
2283 | for_each_tracing_cpu(cpu) { | 2283 | for_each_tracing_cpu(cpu) { |
2284 | /* | 2284 | /* |
2285 | * Increase/decrease the disabled counter if we are | 2285 | * Increase/decrease the disabled counter if we are |
@@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2294 | atomic_dec(&global_trace.data[cpu]->disabled); | 2294 | atomic_dec(&global_trace.data[cpu]->disabled); |
2295 | } | 2295 | } |
2296 | } | 2296 | } |
2297 | __raw_spin_unlock(&ftrace_max_lock); | 2297 | arch_spin_unlock(&ftrace_max_lock); |
2298 | local_irq_enable(); | 2298 | local_irq_enable(); |
2299 | 2299 | ||
2300 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2300 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
@@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s) | |||
4307 | 4307 | ||
4308 | static void __ftrace_dump(bool disable_tracing) | 4308 | static void __ftrace_dump(bool disable_tracing) |
4309 | { | 4309 | { |
4310 | static raw_spinlock_t ftrace_dump_lock = | 4310 | static arch_spinlock_t ftrace_dump_lock = |
4311 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 4311 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
4312 | /* use static because iter can be a bit big for the stack */ | 4312 | /* use static because iter can be a bit big for the stack */ |
4313 | static struct trace_iterator iter; | 4313 | static struct trace_iterator iter; |
4314 | unsigned int old_userobj; | 4314 | unsigned int old_userobj; |
@@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4318 | 4318 | ||
4319 | /* only one dump */ | 4319 | /* only one dump */ |
4320 | local_irq_save(flags); | 4320 | local_irq_save(flags); |
4321 | __raw_spin_lock(&ftrace_dump_lock); | 4321 | arch_spin_lock(&ftrace_dump_lock); |
4322 | if (dump_ran) | 4322 | if (dump_ran) |
4323 | goto out; | 4323 | goto out; |
4324 | 4324 | ||
@@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4393 | } | 4393 | } |
4394 | 4394 | ||
4395 | out: | 4395 | out: |
4396 | __raw_spin_unlock(&ftrace_dump_lock); | 4396 | arch_spin_unlock(&ftrace_dump_lock); |
4397 | local_irq_restore(flags); | 4397 | local_irq_restore(flags); |
4398 | } | 4398 | } |
4399 | 4399 | ||
@@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void) | |||
4454 | /* Allocate the first page for all buffers */ | 4454 | /* Allocate the first page for all buffers */ |
4455 | for_each_tracing_cpu(i) { | 4455 | for_each_tracing_cpu(i) { |
4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
4457 | max_tr.data[i] = &per_cpu(max_data, i); | 4457 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4458 | } | 4458 | } |
4459 | 4459 | ||
4460 | trace_init_cmdlines(); | 4460 | trace_init_cmdlines(); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 7fa33cab6962..a52bed2eedd8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -443,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void); | |||
443 | 443 | ||
444 | extern int ring_buffer_expanded; | 444 | extern int ring_buffer_expanded; |
445 | extern bool tracing_selftest_disabled; | 445 | extern bool tracing_selftest_disabled; |
446 | DECLARE_PER_CPU(local_t, ftrace_cpu_disabled); | 446 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); |
447 | 447 | ||
448 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 448 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
449 | extern int trace_selftest_startup_function(struct tracer *trace, | 449 | extern int trace_selftest_startup_function(struct tracer *trace, |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 878c03f386ba..84a3a7ba072a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void) | |||
71 | /* keep prev_time and lock in the same cacheline. */ | 71 | /* keep prev_time and lock in the same cacheline. */ |
72 | static struct { | 72 | static struct { |
73 | u64 prev_time; | 73 | u64 prev_time; |
74 | raw_spinlock_t lock; | 74 | arch_spinlock_t lock; |
75 | } trace_clock_struct ____cacheline_aligned_in_smp = | 75 | } trace_clock_struct ____cacheline_aligned_in_smp = |
76 | { | 76 | { |
77 | .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, | 77 | .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, |
78 | }; | 78 | }; |
79 | 79 | ||
80 | u64 notrace trace_clock_global(void) | 80 | u64 notrace trace_clock_global(void) |
@@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void) | |||
94 | if (unlikely(in_nmi())) | 94 | if (unlikely(in_nmi())) |
95 | goto out; | 95 | goto out; |
96 | 96 | ||
97 | __raw_spin_lock(&trace_clock_struct.lock); | 97 | arch_spin_lock(&trace_clock_struct.lock); |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * TODO: if this happens often then maybe we should reset | 100 | * TODO: if this happens often then maybe we should reset |
@@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void) | |||
106 | 106 | ||
107 | trace_clock_struct.prev_time = now; | 107 | trace_clock_struct.prev_time = now; |
108 | 108 | ||
109 | __raw_spin_unlock(&trace_clock_struct.lock); | 109 | arch_spin_unlock(&trace_clock_struct.lock); |
110 | 110 | ||
111 | out: | 111 | out: |
112 | raw_local_irq_restore(flags); | 112 | raw_local_irq_restore(flags); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index a43d009c561a..b1342c5d37cf 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
187 | struct ring_buffer *buffer = tr->buffer; | 187 | struct ring_buffer *buffer = tr->buffer; |
188 | struct ftrace_graph_ent_entry *entry; | 188 | struct ftrace_graph_ent_entry *entry; |
189 | 189 | ||
190 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 190 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
191 | return 0; | 191 | return 0; |
192 | 192 | ||
193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
@@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
251 | struct ring_buffer *buffer = tr->buffer; | 251 | struct ring_buffer *buffer = tr->buffer; |
252 | struct ftrace_graph_ret_entry *entry; | 252 | struct ftrace_graph_ret_entry *entry; |
253 | 253 | ||
254 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 254 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
255 | return; | 255 | return; |
256 | 256 | ||
257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 69543a905cd5..7b97000745f5 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -20,10 +20,10 @@ | |||
20 | 20 | ||
21 | #define BTS_BUFFER_SIZE (1 << 13) | 21 | #define BTS_BUFFER_SIZE (1 << 13) |
22 | 22 | ||
23 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 23 | static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer); |
24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); | 24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer); |
25 | 25 | ||
26 | #define this_tracer per_cpu(tracer, smp_processor_id()) | 26 | #define this_tracer per_cpu(hwb_tracer, smp_processor_id()) |
27 | 27 | ||
28 | static int trace_hw_branches_enabled __read_mostly; | 28 | static int trace_hw_branches_enabled __read_mostly; |
29 | static int trace_hw_branches_suspended __read_mostly; | 29 | static int trace_hw_branches_suspended __read_mostly; |
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly; | |||
32 | 32 | ||
33 | static void bts_trace_init_cpu(int cpu) | 33 | static void bts_trace_init_cpu(int cpu) |
34 | { | 34 | { |
35 | per_cpu(tracer, cpu) = | 35 | per_cpu(hwb_tracer, cpu) = |
36 | ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE, | 36 | ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu), |
37 | NULL, (size_t)-1, BTS_KERNEL); | 37 | BTS_BUFFER_SIZE, NULL, (size_t)-1, |
38 | BTS_KERNEL); | ||
38 | 39 | ||
39 | if (IS_ERR(per_cpu(tracer, cpu))) | 40 | if (IS_ERR(per_cpu(hwb_tracer, cpu))) |
40 | per_cpu(tracer, cpu) = NULL; | 41 | per_cpu(hwb_tracer, cpu) = NULL; |
41 | } | 42 | } |
42 | 43 | ||
43 | static int bts_trace_init(struct trace_array *tr) | 44 | static int bts_trace_init(struct trace_array *tr) |
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr) | |||
51 | for_each_online_cpu(cpu) { | 52 | for_each_online_cpu(cpu) { |
52 | bts_trace_init_cpu(cpu); | 53 | bts_trace_init_cpu(cpu); |
53 | 54 | ||
54 | if (likely(per_cpu(tracer, cpu))) | 55 | if (likely(per_cpu(hwb_tracer, cpu))) |
55 | trace_hw_branches_enabled = 1; | 56 | trace_hw_branches_enabled = 1; |
56 | } | 57 | } |
57 | trace_hw_branches_suspended = 0; | 58 | trace_hw_branches_suspended = 0; |
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr) | |||
67 | 68 | ||
68 | get_online_cpus(); | 69 | get_online_cpus(); |
69 | for_each_online_cpu(cpu) { | 70 | for_each_online_cpu(cpu) { |
70 | if (likely(per_cpu(tracer, cpu))) { | 71 | if (likely(per_cpu(hwb_tracer, cpu))) { |
71 | ds_release_bts(per_cpu(tracer, cpu)); | 72 | ds_release_bts(per_cpu(hwb_tracer, cpu)); |
72 | per_cpu(tracer, cpu) = NULL; | 73 | per_cpu(hwb_tracer, cpu) = NULL; |
73 | } | 74 | } |
74 | } | 75 | } |
75 | trace_hw_branches_enabled = 0; | 76 | trace_hw_branches_enabled = 0; |
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr) | |||
83 | 84 | ||
84 | get_online_cpus(); | 85 | get_online_cpus(); |
85 | for_each_online_cpu(cpu) | 86 | for_each_online_cpu(cpu) |
86 | if (likely(per_cpu(tracer, cpu))) | 87 | if (likely(per_cpu(hwb_tracer, cpu))) |
87 | ds_resume_bts(per_cpu(tracer, cpu)); | 88 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
88 | trace_hw_branches_suspended = 0; | 89 | trace_hw_branches_suspended = 0; |
89 | put_online_cpus(); | 90 | put_online_cpus(); |
90 | } | 91 | } |
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr) | |||
95 | 96 | ||
96 | get_online_cpus(); | 97 | get_online_cpus(); |
97 | for_each_online_cpu(cpu) | 98 | for_each_online_cpu(cpu) |
98 | if (likely(per_cpu(tracer, cpu))) | 99 | if (likely(per_cpu(hwb_tracer, cpu))) |
99 | ds_suspend_bts(per_cpu(tracer, cpu)); | 100 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
100 | trace_hw_branches_suspended = 1; | 101 | trace_hw_branches_suspended = 1; |
101 | put_online_cpus(); | 102 | put_online_cpus(); |
102 | } | 103 | } |
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | |||
114 | bts_trace_init_cpu(cpu); | 115 | bts_trace_init_cpu(cpu); |
115 | 116 | ||
116 | if (trace_hw_branches_suspended && | 117 | if (trace_hw_branches_suspended && |
117 | likely(per_cpu(tracer, cpu))) | 118 | likely(per_cpu(hwb_tracer, cpu))) |
118 | ds_suspend_bts(per_cpu(tracer, cpu)); | 119 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
119 | } | 120 | } |
120 | break; | 121 | break; |
121 | 122 | ||
122 | case CPU_DOWN_PREPARE: | 123 | case CPU_DOWN_PREPARE: |
123 | /* The notification is sent with interrupts enabled. */ | 124 | /* The notification is sent with interrupts enabled. */ |
124 | if (likely(per_cpu(tracer, cpu))) { | 125 | if (likely(per_cpu(hwb_tracer, cpu))) { |
125 | ds_release_bts(per_cpu(tracer, cpu)); | 126 | ds_release_bts(per_cpu(hwb_tracer, cpu)); |
126 | per_cpu(tracer, cpu) = NULL; | 127 | per_cpu(hwb_tracer, cpu) = NULL; |
127 | } | 128 | } |
128 | } | 129 | } |
129 | 130 | ||
@@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
258 | 259 | ||
259 | get_online_cpus(); | 260 | get_online_cpus(); |
260 | for_each_online_cpu(cpu) | 261 | for_each_online_cpu(cpu) |
261 | if (likely(per_cpu(tracer, cpu))) | 262 | if (likely(per_cpu(hwb_tracer, cpu))) |
262 | ds_suspend_bts(per_cpu(tracer, cpu)); | 263 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
263 | /* | 264 | /* |
264 | * We need to collect the trace on the respective cpu since ftrace | 265 | * We need to collect the trace on the respective cpu since ftrace |
265 | * implicitly adds the record for the current cpu. | 266 | * implicitly adds the record for the current cpu. |
@@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
268 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | 269 | on_each_cpu(trace_bts_cpu, iter->tr, 1); |
269 | 270 | ||
270 | for_each_online_cpu(cpu) | 271 | for_each_online_cpu(cpu) |
271 | if (likely(per_cpu(tracer, cpu))) | 272 | if (likely(per_cpu(hwb_tracer, cpu))) |
272 | ds_resume_bts(per_cpu(tracer, cpu)); | 273 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
273 | put_online_cpus(); | 274 | put_online_cpus(); |
274 | } | 275 | } |
275 | 276 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 26185d727676..0271742abb8d 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -28,8 +28,8 @@ static int wakeup_current_cpu; | |||
28 | static unsigned wakeup_prio = -1; | 28 | static unsigned wakeup_prio = -1; |
29 | static int wakeup_rt; | 29 | static int wakeup_rt; |
30 | 30 | ||
31 | static raw_spinlock_t wakeup_lock = | 31 | static arch_spinlock_t wakeup_lock = |
32 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
33 | 33 | ||
34 | static void __wakeup_reset(struct trace_array *tr); | 34 | static void __wakeup_reset(struct trace_array *tr); |
35 | 35 | ||
@@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
143 | goto out; | 143 | goto out; |
144 | 144 | ||
145 | local_irq_save(flags); | 145 | local_irq_save(flags); |
146 | __raw_spin_lock(&wakeup_lock); | 146 | arch_spin_lock(&wakeup_lock); |
147 | 147 | ||
148 | /* We could race with grabbing wakeup_lock */ | 148 | /* We could race with grabbing wakeup_lock */ |
149 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 149 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
@@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
169 | 169 | ||
170 | out_unlock: | 170 | out_unlock: |
171 | __wakeup_reset(wakeup_trace); | 171 | __wakeup_reset(wakeup_trace); |
172 | __raw_spin_unlock(&wakeup_lock); | 172 | arch_spin_unlock(&wakeup_lock); |
173 | local_irq_restore(flags); | 173 | local_irq_restore(flags); |
174 | out: | 174 | out: |
175 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 175 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
@@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr) | |||
193 | tracing_reset_online_cpus(tr); | 193 | tracing_reset_online_cpus(tr); |
194 | 194 | ||
195 | local_irq_save(flags); | 195 | local_irq_save(flags); |
196 | __raw_spin_lock(&wakeup_lock); | 196 | arch_spin_lock(&wakeup_lock); |
197 | __wakeup_reset(tr); | 197 | __wakeup_reset(tr); |
198 | __raw_spin_unlock(&wakeup_lock); | 198 | arch_spin_unlock(&wakeup_lock); |
199 | local_irq_restore(flags); | 199 | local_irq_restore(flags); |
200 | } | 200 | } |
201 | 201 | ||
@@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
225 | goto out; | 225 | goto out; |
226 | 226 | ||
227 | /* interrupts should be off from try_to_wake_up */ | 227 | /* interrupts should be off from try_to_wake_up */ |
228 | __raw_spin_lock(&wakeup_lock); | 228 | arch_spin_lock(&wakeup_lock); |
229 | 229 | ||
230 | /* check for races. */ | 230 | /* check for races. */ |
231 | if (!tracer_enabled || p->prio >= wakeup_prio) | 231 | if (!tracer_enabled || p->prio >= wakeup_prio) |
@@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
256 | 256 | ||
257 | out_locked: | 257 | out_locked: |
258 | __raw_spin_unlock(&wakeup_lock); | 258 | arch_spin_unlock(&wakeup_lock); |
259 | out: | 259 | out: |
260 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 260 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
261 | } | 261 | } |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index dc98309e839a..280fea470d67 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
67 | 67 | ||
68 | /* Don't allow flipping of max traces now */ | 68 | /* Don't allow flipping of max traces now */ |
69 | local_irq_save(flags); | 69 | local_irq_save(flags); |
70 | __raw_spin_lock(&ftrace_max_lock); | 70 | arch_spin_lock(&ftrace_max_lock); |
71 | 71 | ||
72 | cnt = ring_buffer_entries(tr->buffer); | 72 | cnt = ring_buffer_entries(tr->buffer); |
73 | 73 | ||
@@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
85 | break; | 85 | break; |
86 | } | 86 | } |
87 | tracing_on(); | 87 | tracing_on(); |
88 | __raw_spin_unlock(&ftrace_max_lock); | 88 | arch_spin_unlock(&ftrace_max_lock); |
89 | local_irq_restore(flags); | 89 | local_irq_restore(flags); |
90 | 90 | ||
91 | if (count) | 91 | if (count) |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 8504ac71e4e8..678a5120ee30 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = { | |||
27 | }; | 27 | }; |
28 | 28 | ||
29 | static unsigned long max_stack_size; | 29 | static unsigned long max_stack_size; |
30 | static raw_spinlock_t max_stack_lock = | 30 | static arch_spinlock_t max_stack_lock = |
31 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 31 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
32 | 32 | ||
33 | static int stack_trace_disabled __read_mostly; | 33 | static int stack_trace_disabled __read_mostly; |
34 | static DEFINE_PER_CPU(int, trace_active); | 34 | static DEFINE_PER_CPU(int, trace_active); |
@@ -54,7 +54,7 @@ static inline void check_stack(void) | |||
54 | return; | 54 | return; |
55 | 55 | ||
56 | local_irq_save(flags); | 56 | local_irq_save(flags); |
57 | __raw_spin_lock(&max_stack_lock); | 57 | arch_spin_lock(&max_stack_lock); |
58 | 58 | ||
59 | /* a race could have already updated it */ | 59 | /* a race could have already updated it */ |
60 | if (this_size <= max_stack_size) | 60 | if (this_size <= max_stack_size) |
@@ -103,7 +103,7 @@ static inline void check_stack(void) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | out: | 105 | out: |
106 | __raw_spin_unlock(&max_stack_lock); | 106 | arch_spin_unlock(&max_stack_lock); |
107 | local_irq_restore(flags); | 107 | local_irq_restore(flags); |
108 | } | 108 | } |
109 | 109 | ||
@@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
171 | return ret; | 171 | return ret; |
172 | 172 | ||
173 | local_irq_save(flags); | 173 | local_irq_save(flags); |
174 | __raw_spin_lock(&max_stack_lock); | 174 | arch_spin_lock(&max_stack_lock); |
175 | *ptr = val; | 175 | *ptr = val; |
176 | __raw_spin_unlock(&max_stack_lock); | 176 | arch_spin_unlock(&max_stack_lock); |
177 | local_irq_restore(flags); | 177 | local_irq_restore(flags); |
178 | 178 | ||
179 | return count; | 179 | return count; |
@@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
207 | static void *t_start(struct seq_file *m, loff_t *pos) | 207 | static void *t_start(struct seq_file *m, loff_t *pos) |
208 | { | 208 | { |
209 | local_irq_disable(); | 209 | local_irq_disable(); |
210 | __raw_spin_lock(&max_stack_lock); | 210 | arch_spin_lock(&max_stack_lock); |
211 | 211 | ||
212 | if (*pos == 0) | 212 | if (*pos == 0) |
213 | return SEQ_START_TOKEN; | 213 | return SEQ_START_TOKEN; |
@@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
217 | 217 | ||
218 | static void t_stop(struct seq_file *m, void *p) | 218 | static void t_stop(struct seq_file *m, void *p) |
219 | { | 219 | { |
220 | __raw_spin_unlock(&max_stack_lock); | 220 | arch_spin_unlock(&max_stack_lock); |
221 | local_irq_enable(); | 221 | local_irq_enable(); |
222 | } | 222 | } |
223 | 223 | ||