aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 88bd9ae2a9ed..bb6b5e7fa2a2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
86 */ 86 */
87static int tracing_disabled = 1; 87static int tracing_disabled = 1;
88 88
89DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 89DEFINE_PER_CPU(int, ftrace_cpu_disabled);
90 90
91static inline void ftrace_disable_cpu(void) 91static inline void ftrace_disable_cpu(void)
92{ 92{
93 preempt_disable(); 93 preempt_disable();
94 local_inc(&__get_cpu_var(ftrace_cpu_disabled)); 94 __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
95} 95}
96 96
97static inline void ftrace_enable_cpu(void) 97static inline void ftrace_enable_cpu(void)
98{ 98{
99 local_dec(&__get_cpu_var(ftrace_cpu_disabled)); 99 __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
100 preempt_enable(); 100 preempt_enable();
101} 101}
102 102
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
203 */ 203 */
204static struct trace_array max_tr; 204static struct trace_array max_tr;
205 205
206static DEFINE_PER_CPU(struct trace_array_cpu, max_data); 206static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
207 207
208/* tracer_enabled is used to toggle activation of a tracer */ 208/* tracer_enabled is used to toggle activation of a tracer */
209static int tracer_enabled = 1; 209static int tracer_enabled = 1;
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
493 * protected by per_cpu spinlocks. But the action of the swap 493 * protected by per_cpu spinlocks. But the action of the swap
494 * needs its own lock. 494 * needs its own lock.
495 * 495 *
496 * This is defined as a raw_spinlock_t in order to help 496 * This is defined as a arch_spinlock_t in order to help
497 * with performance when lockdep debugging is enabled. 497 * with performance when lockdep debugging is enabled.
498 * 498 *
499 * It is also used in other places outside the update_max_tr 499 * It is also used in other places outside the update_max_tr
500 * so it needs to be defined outside of the 500 * so it needs to be defined outside of the
501 * CONFIG_TRACER_MAX_TRACE. 501 * CONFIG_TRACER_MAX_TRACE.
502 */ 502 */
503static raw_spinlock_t ftrace_max_lock = 503static arch_spinlock_t ftrace_max_lock =
504 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 504 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
505 505
506#ifdef CONFIG_TRACER_MAX_TRACE 506#ifdef CONFIG_TRACER_MAX_TRACE
507unsigned long __read_mostly tracing_max_latency; 507unsigned long __read_mostly tracing_max_latency;
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
555 return; 555 return;
556 556
557 WARN_ON_ONCE(!irqs_disabled()); 557 WARN_ON_ONCE(!irqs_disabled());
558 __raw_spin_lock(&ftrace_max_lock); 558 arch_spin_lock(&ftrace_max_lock);
559 559
560 tr->buffer = max_tr.buffer; 560 tr->buffer = max_tr.buffer;
561 max_tr.buffer = buf; 561 max_tr.buffer = buf;
562 562
563 __update_max_tr(tr, tsk, cpu); 563 __update_max_tr(tr, tsk, cpu);
564 __raw_spin_unlock(&ftrace_max_lock); 564 arch_spin_unlock(&ftrace_max_lock);
565} 565}
566 566
567/** 567/**
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
581 return; 581 return;
582 582
583 WARN_ON_ONCE(!irqs_disabled()); 583 WARN_ON_ONCE(!irqs_disabled());
584 __raw_spin_lock(&ftrace_max_lock); 584 arch_spin_lock(&ftrace_max_lock);
585 585
586 ftrace_disable_cpu(); 586 ftrace_disable_cpu();
587 587
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
604 604
605 __update_max_tr(tr, tsk, cpu); 605 __update_max_tr(tr, tsk, cpu);
606 __raw_spin_unlock(&ftrace_max_lock); 606 arch_spin_unlock(&ftrace_max_lock);
607} 607}
608#endif /* CONFIG_TRACER_MAX_TRACE */ 608#endif /* CONFIG_TRACER_MAX_TRACE */
609 609
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
802static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 802static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
804static int cmdline_idx; 804static int cmdline_idx;
805static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 805static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
806 806
807/* temporary disable recording */ 807/* temporary disable recording */
808static atomic_t trace_record_cmdline_disabled __read_mostly; 808static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
915 * nor do we want to disable interrupts, 915 * nor do we want to disable interrupts,
916 * so if we miss here, then better luck next time. 916 * so if we miss here, then better luck next time.
917 */ 917 */
918 if (!__raw_spin_trylock(&trace_cmdline_lock)) 918 if (!arch_spin_trylock(&trace_cmdline_lock))
919 return; 919 return;
920 920
921 idx = map_pid_to_cmdline[tsk->pid]; 921 idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
940 940
941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
942 942
943 __raw_spin_unlock(&trace_cmdline_lock); 943 arch_spin_unlock(&trace_cmdline_lock);
944} 944}
945 945
946void trace_find_cmdline(int pid, char comm[]) 946void trace_find_cmdline(int pid, char comm[])
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[])
958 } 958 }
959 959
960 preempt_disable(); 960 preempt_disable();
961 __raw_spin_lock(&trace_cmdline_lock); 961 arch_spin_lock(&trace_cmdline_lock);
962 map = map_pid_to_cmdline[pid]; 962 map = map_pid_to_cmdline[pid];
963 if (map != NO_CMDLINE_MAP) 963 if (map != NO_CMDLINE_MAP)
964 strcpy(comm, saved_cmdlines[map]); 964 strcpy(comm, saved_cmdlines[map]);
965 else 965 else
966 strcpy(comm, "<...>"); 966 strcpy(comm, "<...>");
967 967
968 __raw_spin_unlock(&trace_cmdline_lock); 968 arch_spin_unlock(&trace_cmdline_lock);
969 preempt_enable(); 969 preempt_enable();
970} 970}
971 971
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
1085 struct ftrace_entry *entry; 1085 struct ftrace_entry *entry;
1086 1086
1087 /* If we are reading the ring buffer, don't trace */ 1087 /* If we are reading the ring buffer, don't trace */
1088 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 1088 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
1089 return; 1089 return;
1090 1090
1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1251 */ 1251 */
1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1253{ 1253{
1254 static raw_spinlock_t trace_buf_lock = 1254 static arch_spinlock_t trace_buf_lock =
1255 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1255 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1256 static u32 trace_buf[TRACE_BUF_SIZE];
1257 1257
1258 struct ftrace_event_call *call = &event_bprint; 1258 struct ftrace_event_call *call = &event_bprint;
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1283 1283
1284 /* Lockdep uses trace_printk for lock tracing */ 1284 /* Lockdep uses trace_printk for lock tracing */
1285 local_irq_save(flags); 1285 local_irq_save(flags);
1286 __raw_spin_lock(&trace_buf_lock); 1286 arch_spin_lock(&trace_buf_lock);
1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1288 1288
1289 if (len > TRACE_BUF_SIZE || len < 0) 1289 if (len > TRACE_BUF_SIZE || len < 0)
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1304 ring_buffer_unlock_commit(buffer, event); 1304 ring_buffer_unlock_commit(buffer, event);
1305 1305
1306out_unlock: 1306out_unlock:
1307 __raw_spin_unlock(&trace_buf_lock); 1307 arch_spin_unlock(&trace_buf_lock);
1308 local_irq_restore(flags); 1308 local_irq_restore(flags);
1309 1309
1310out: 1310out:
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
1334int trace_array_vprintk(struct trace_array *tr, 1334int trace_array_vprintk(struct trace_array *tr,
1335 unsigned long ip, const char *fmt, va_list args) 1335 unsigned long ip, const char *fmt, va_list args)
1336{ 1336{
1337 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1337 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1338 static char trace_buf[TRACE_BUF_SIZE]; 1338 static char trace_buf[TRACE_BUF_SIZE];
1339 1339
1340 struct ftrace_event_call *call = &event_print; 1340 struct ftrace_event_call *call = &event_print;
@@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr,
1360 1360
1361 pause_graph_tracing(); 1361 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1362 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1363 arch_spin_lock(&trace_buf_lock);
1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1365 1365
1366 size = sizeof(*entry) + len + 1; 1366 size = sizeof(*entry) + len + 1;
@@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr,
1378 ring_buffer_unlock_commit(buffer, event); 1378 ring_buffer_unlock_commit(buffer, event);
1379 1379
1380 out_unlock: 1380 out_unlock:
1381 __raw_spin_unlock(&trace_buf_lock); 1381 arch_spin_unlock(&trace_buf_lock);
1382 raw_local_irq_restore(irq_flags); 1382 raw_local_irq_restore(irq_flags);
1383 unpause_graph_tracing(); 1383 unpause_graph_tracing();
1384 out: 1384 out:
@@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2279 mutex_lock(&tracing_cpumask_update_lock); 2279 mutex_lock(&tracing_cpumask_update_lock);
2280 2280
2281 local_irq_disable(); 2281 local_irq_disable();
2282 __raw_spin_lock(&ftrace_max_lock); 2282 arch_spin_lock(&ftrace_max_lock);
2283 for_each_tracing_cpu(cpu) { 2283 for_each_tracing_cpu(cpu) {
2284 /* 2284 /*
2285 * Increase/decrease the disabled counter if we are 2285 * Increase/decrease the disabled counter if we are
@@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2294 atomic_dec(&global_trace.data[cpu]->disabled); 2294 atomic_dec(&global_trace.data[cpu]->disabled);
2295 } 2295 }
2296 } 2296 }
2297 __raw_spin_unlock(&ftrace_max_lock); 2297 arch_spin_unlock(&ftrace_max_lock);
2298 local_irq_enable(); 2298 local_irq_enable();
2299 2299
2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s)
4307 4307
4308static void __ftrace_dump(bool disable_tracing) 4308static void __ftrace_dump(bool disable_tracing)
4309{ 4309{
4310 static raw_spinlock_t ftrace_dump_lock = 4310 static arch_spinlock_t ftrace_dump_lock =
4311 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4311 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4312 /* use static because iter can be a bit big for the stack */ 4312 /* use static because iter can be a bit big for the stack */
4313 static struct trace_iterator iter; 4313 static struct trace_iterator iter;
4314 unsigned int old_userobj; 4314 unsigned int old_userobj;
@@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing)
4318 4318
4319 /* only one dump */ 4319 /* only one dump */
4320 local_irq_save(flags); 4320 local_irq_save(flags);
4321 __raw_spin_lock(&ftrace_dump_lock); 4321 arch_spin_lock(&ftrace_dump_lock);
4322 if (dump_ran) 4322 if (dump_ran)
4323 goto out; 4323 goto out;
4324 4324
@@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing)
4393 } 4393 }
4394 4394
4395 out: 4395 out:
4396 __raw_spin_unlock(&ftrace_dump_lock); 4396 arch_spin_unlock(&ftrace_dump_lock);
4397 local_irq_restore(flags); 4397 local_irq_restore(flags);
4398} 4398}
4399 4399
@@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void)
4454 /* Allocate the first page for all buffers */ 4454 /* Allocate the first page for all buffers */
4455 for_each_tracing_cpu(i) { 4455 for_each_tracing_cpu(i) {
4456 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4456 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4457 max_tr.data[i] = &per_cpu(max_data, i); 4457 max_tr.data[i] = &per_cpu(max_tr_data, i);
4458 } 4458 }
4459 4459
4460 trace_init_cmdlines(); 4460 trace_init_cmdlines();