diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 63bc1cc38219..bb6b5e7fa2a2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
555 | return; | 555 | return; |
556 | 556 | ||
557 | WARN_ON_ONCE(!irqs_disabled()); | 557 | WARN_ON_ONCE(!irqs_disabled()); |
558 | __raw_spin_lock(&ftrace_max_lock); | 558 | arch_spin_lock(&ftrace_max_lock); |
559 | 559 | ||
560 | tr->buffer = max_tr.buffer; | 560 | tr->buffer = max_tr.buffer; |
561 | max_tr.buffer = buf; | 561 | max_tr.buffer = buf; |
562 | 562 | ||
563 | __update_max_tr(tr, tsk, cpu); | 563 | __update_max_tr(tr, tsk, cpu); |
564 | __raw_spin_unlock(&ftrace_max_lock); | 564 | arch_spin_unlock(&ftrace_max_lock); |
565 | } | 565 | } |
566 | 566 | ||
567 | /** | 567 | /** |
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
581 | return; | 581 | return; |
582 | 582 | ||
583 | WARN_ON_ONCE(!irqs_disabled()); | 583 | WARN_ON_ONCE(!irqs_disabled()); |
584 | __raw_spin_lock(&ftrace_max_lock); | 584 | arch_spin_lock(&ftrace_max_lock); |
585 | 585 | ||
586 | ftrace_disable_cpu(); | 586 | ftrace_disable_cpu(); |
587 | 587 | ||
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
604 | 604 | ||
605 | __update_max_tr(tr, tsk, cpu); | 605 | __update_max_tr(tr, tsk, cpu); |
606 | __raw_spin_unlock(&ftrace_max_lock); | 606 | arch_spin_unlock(&ftrace_max_lock); |
607 | } | 607 | } |
608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 608 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
609 | 609 | ||
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
915 | * nor do we want to disable interrupts, | 915 | * nor do we want to disable interrupts, |
916 | * so if we miss here, then better luck next time. | 916 | * so if we miss here, then better luck next time. |
917 | */ | 917 | */ |
918 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 918 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
919 | return; | 919 | return; |
920 | 920 | ||
921 | idx = map_pid_to_cmdline[tsk->pid]; | 921 | idx = map_pid_to_cmdline[tsk->pid]; |
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
940 | 940 | ||
941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
942 | 942 | ||
943 | __raw_spin_unlock(&trace_cmdline_lock); | 943 | arch_spin_unlock(&trace_cmdline_lock); |
944 | } | 944 | } |
945 | 945 | ||
946 | void trace_find_cmdline(int pid, char comm[]) | 946 | void trace_find_cmdline(int pid, char comm[]) |
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[]) | |||
958 | } | 958 | } |
959 | 959 | ||
960 | preempt_disable(); | 960 | preempt_disable(); |
961 | __raw_spin_lock(&trace_cmdline_lock); | 961 | arch_spin_lock(&trace_cmdline_lock); |
962 | map = map_pid_to_cmdline[pid]; | 962 | map = map_pid_to_cmdline[pid]; |
963 | if (map != NO_CMDLINE_MAP) | 963 | if (map != NO_CMDLINE_MAP) |
964 | strcpy(comm, saved_cmdlines[map]); | 964 | strcpy(comm, saved_cmdlines[map]); |
965 | else | 965 | else |
966 | strcpy(comm, "<...>"); | 966 | strcpy(comm, "<...>"); |
967 | 967 | ||
968 | __raw_spin_unlock(&trace_cmdline_lock); | 968 | arch_spin_unlock(&trace_cmdline_lock); |
969 | preempt_enable(); | 969 | preempt_enable(); |
970 | } | 970 | } |
971 | 971 | ||
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1283 | 1283 | ||
1284 | /* Lockdep uses trace_printk for lock tracing */ | 1284 | /* Lockdep uses trace_printk for lock tracing */ |
1285 | local_irq_save(flags); | 1285 | local_irq_save(flags); |
1286 | __raw_spin_lock(&trace_buf_lock); | 1286 | arch_spin_lock(&trace_buf_lock); |
1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1288 | 1288 | ||
1289 | if (len > TRACE_BUF_SIZE || len < 0) | 1289 | if (len > TRACE_BUF_SIZE || len < 0) |
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1304 | ring_buffer_unlock_commit(buffer, event); | 1304 | ring_buffer_unlock_commit(buffer, event); |
1305 | 1305 | ||
1306 | out_unlock: | 1306 | out_unlock: |
1307 | __raw_spin_unlock(&trace_buf_lock); | 1307 | arch_spin_unlock(&trace_buf_lock); |
1308 | local_irq_restore(flags); | 1308 | local_irq_restore(flags); |
1309 | 1309 | ||
1310 | out: | 1310 | out: |
@@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1360 | 1360 | ||
1361 | pause_graph_tracing(); | 1361 | pause_graph_tracing(); |
1362 | raw_local_irq_save(irq_flags); | 1362 | raw_local_irq_save(irq_flags); |
1363 | __raw_spin_lock(&trace_buf_lock); | 1363 | arch_spin_lock(&trace_buf_lock); |
1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1365 | 1365 | ||
1366 | size = sizeof(*entry) + len + 1; | 1366 | size = sizeof(*entry) + len + 1; |
@@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1378 | ring_buffer_unlock_commit(buffer, event); | 1378 | ring_buffer_unlock_commit(buffer, event); |
1379 | 1379 | ||
1380 | out_unlock: | 1380 | out_unlock: |
1381 | __raw_spin_unlock(&trace_buf_lock); | 1381 | arch_spin_unlock(&trace_buf_lock); |
1382 | raw_local_irq_restore(irq_flags); | 1382 | raw_local_irq_restore(irq_flags); |
1383 | unpause_graph_tracing(); | 1383 | unpause_graph_tracing(); |
1384 | out: | 1384 | out: |
@@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2279 | mutex_lock(&tracing_cpumask_update_lock); | 2279 | mutex_lock(&tracing_cpumask_update_lock); |
2280 | 2280 | ||
2281 | local_irq_disable(); | 2281 | local_irq_disable(); |
2282 | __raw_spin_lock(&ftrace_max_lock); | 2282 | arch_spin_lock(&ftrace_max_lock); |
2283 | for_each_tracing_cpu(cpu) { | 2283 | for_each_tracing_cpu(cpu) { |
2284 | /* | 2284 | /* |
2285 | * Increase/decrease the disabled counter if we are | 2285 | * Increase/decrease the disabled counter if we are |
@@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2294 | atomic_dec(&global_trace.data[cpu]->disabled); | 2294 | atomic_dec(&global_trace.data[cpu]->disabled); |
2295 | } | 2295 | } |
2296 | } | 2296 | } |
2297 | __raw_spin_unlock(&ftrace_max_lock); | 2297 | arch_spin_unlock(&ftrace_max_lock); |
2298 | local_irq_enable(); | 2298 | local_irq_enable(); |
2299 | 2299 | ||
2300 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2300 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
@@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4318 | 4318 | ||
4319 | /* only one dump */ | 4319 | /* only one dump */ |
4320 | local_irq_save(flags); | 4320 | local_irq_save(flags); |
4321 | __raw_spin_lock(&ftrace_dump_lock); | 4321 | arch_spin_lock(&ftrace_dump_lock); |
4322 | if (dump_ran) | 4322 | if (dump_ran) |
4323 | goto out; | 4323 | goto out; |
4324 | 4324 | ||
@@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4393 | } | 4393 | } |
4394 | 4394 | ||
4395 | out: | 4395 | out: |
4396 | __raw_spin_unlock(&ftrace_dump_lock); | 4396 | arch_spin_unlock(&ftrace_dump_lock); |
4397 | local_irq_restore(flags); | 4397 | local_irq_restore(flags); |
4398 | } | 4398 | } |
4399 | 4399 | ||