aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c88
1 files changed, 39 insertions, 49 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 73e67b68c53b..8e3f20a18a06 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct
362} 362}
363 363
364/** 364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list 365 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
366 * @pid_list: The list to modify 366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit 367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove 368 * @task: The task to add or remove
@@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr)
925} 925}
926 926
927/** 927/**
928 * trace_snapshot - take a snapshot of the current buffer. 928 * tracing_snapshot - take a snapshot of the current buffer.
929 * 929 *
930 * This causes a swap between the snapshot buffer and the current live 930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live 931 * tracing buffer. You can use this to take snapshots of the live
@@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void)
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005 1005
1006/** 1006/**
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. 1007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 * 1008 *
1009 * This is similar to trace_snapshot(), but it will allocate the 1009 * This is similar to tracing_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only 1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep. 1011 * where it is safe to sleep, as the allocation may sleep.
1012 * 1012 *
@@ -1303,7 +1303,7 @@ unsigned long __read_mostly tracing_thresh;
1303/* 1303/*
1304 * Copy the new maximum trace into the separate maximum-trace 1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved, 1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 1306 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1307 */ 1307 */
1308static void 1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2374} 2374}
2375EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 2375EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2376 2376
2377/*
2378 * Skip 3:
2379 *
2380 * trace_buffer_unlock_commit_regs()
2381 * trace_event_buffer_commit()
2382 * trace_event_raw_event_xxx()
2383*/
2384# define STACK_SKIP 3
2385
2377void trace_buffer_unlock_commit_regs(struct trace_array *tr, 2386void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2378 struct ring_buffer *buffer, 2387 struct ring_buffer *buffer,
2379 struct ring_buffer_event *event, 2388 struct ring_buffer_event *event,
@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2383 __buffer_unlock_commit(buffer, event); 2392 __buffer_unlock_commit(buffer, event);
2384 2393
2385 /* 2394 /*
2386 * If regs is not set, then skip the following callers: 2395 * If regs is not set, then skip the necessary functions.
2387 * trace_buffer_unlock_commit_regs
2388 * event_trigger_unlock_commit
2389 * trace_event_buffer_commit
2390 * trace_event_raw_event_sched_switch
2391 * Note, we can still get here via blktrace, wakeup tracer 2396 * Note, we can still get here via blktrace, wakeup tracer
2392 * and mmiotrace, but that's ok if they lose a function or 2397 * and mmiotrace, but that's ok if they lose a function or
2393 * two. They are that meaningful. 2398 * two. They are not that meaningful.
2394 */ 2399 */
2395 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs); 2400 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2396 ftrace_trace_userstack(buffer, flags, pc); 2401 ftrace_trace_userstack(buffer, flags, pc);
2397} 2402}
2398 2403
@@ -2415,7 +2420,7 @@ trace_process_export(struct trace_export *export,
2415 2420
2416 entry = ring_buffer_event_data(event); 2421 entry = ring_buffer_event_data(event);
2417 size = ring_buffer_event_length(event); 2422 size = ring_buffer_event_length(event);
2418 export->write(entry, size); 2423 export->write(export, entry, size);
2419} 2424}
2420 2425
2421static DEFINE_MUTEX(ftrace_export_lock); 2426static DEFINE_MUTEX(ftrace_export_lock);
@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
2579 trace.skip = skip; 2584 trace.skip = skip;
2580 2585
2581 /* 2586 /*
2582 * Add two, for this function and the call to save_stack_trace() 2587 * Add one, for this function and the call to save_stack_trace()
2583 * If regs is set, then these functions will not be in the way. 2588 * If regs is set, then these functions will not be in the way.
2584 */ 2589 */
2590#ifndef CONFIG_UNWINDER_ORC
2585 if (!regs) 2591 if (!regs)
2586 trace.skip += 2; 2592 trace.skip++;
2593#endif
2587 2594
2588 /* 2595 /*
2589 * Since events can happen in NMIs there's no safe way to 2596 * Since events can happen in NMIs there's no safe way to
@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)
2711 2718
2712 local_save_flags(flags); 2719 local_save_flags(flags);
2713 2720
2714 /* 2721#ifndef CONFIG_UNWINDER_ORC
2715 * Skip 3 more, seems to get us at the caller of 2722 /* Skip 1 to skip this function. */
2716 * this function. 2723 skip++;
2717 */ 2724#endif
2718 skip += 3;
2719 __ftrace_trace_stack(global_trace.trace_buffer.buffer, 2725 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2720 flags, skip, preempt_count(), NULL); 2726 flags, skip, preempt_count(), NULL);
2721} 2727}
@@ -4178,37 +4184,30 @@ static const struct file_operations show_traces_fops = {
4178 .llseek = seq_lseek, 4184 .llseek = seq_lseek,
4179}; 4185};
4180 4186
4181/*
4182 * The tracer itself will not take this lock, but still we want
4183 * to provide a consistent cpumask to user-space:
4184 */
4185static DEFINE_MUTEX(tracing_cpumask_update_lock);
4186
4187/*
4188 * Temporary storage for the character representation of the
4189 * CPU bitmask (and one more byte for the newline):
4190 */
4191static char mask_str[NR_CPUS + 1];
4192
4193static ssize_t 4187static ssize_t
4194tracing_cpumask_read(struct file *filp, char __user *ubuf, 4188tracing_cpumask_read(struct file *filp, char __user *ubuf,
4195 size_t count, loff_t *ppos) 4189 size_t count, loff_t *ppos)
4196{ 4190{
4197 struct trace_array *tr = file_inode(filp)->i_private; 4191 struct trace_array *tr = file_inode(filp)->i_private;
4192 char *mask_str;
4198 int len; 4193 int len;
4199 4194
4200 mutex_lock(&tracing_cpumask_update_lock); 4195 len = snprintf(NULL, 0, "%*pb\n",
4196 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4197 mask_str = kmalloc(len, GFP_KERNEL);
4198 if (!mask_str)
4199 return -ENOMEM;
4201 4200
4202 len = snprintf(mask_str, count, "%*pb\n", 4201 len = snprintf(mask_str, len, "%*pb\n",
4203 cpumask_pr_args(tr->tracing_cpumask)); 4202 cpumask_pr_args(tr->tracing_cpumask));
4204 if (len >= count) { 4203 if (len >= count) {
4205 count = -EINVAL; 4204 count = -EINVAL;
4206 goto out_err; 4205 goto out_err;
4207 } 4206 }
4208 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 4207 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4209 4208
4210out_err: 4209out_err:
4211 mutex_unlock(&tracing_cpumask_update_lock); 4210 kfree(mask_str);
4212 4211
4213 return count; 4212 return count;
4214} 4213}
@@ -4228,8 +4227,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4228 if (err) 4227 if (err)
4229 goto err_unlock; 4228 goto err_unlock;
4230 4229
4231 mutex_lock(&tracing_cpumask_update_lock);
4232
4233 local_irq_disable(); 4230 local_irq_disable();
4234 arch_spin_lock(&tr->max_lock); 4231 arch_spin_lock(&tr->max_lock);
4235 for_each_tracing_cpu(cpu) { 4232 for_each_tracing_cpu(cpu) {
@@ -4252,8 +4249,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4252 local_irq_enable(); 4249 local_irq_enable();
4253 4250
4254 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 4251 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4255
4256 mutex_unlock(&tracing_cpumask_update_lock);
4257 free_cpumask_var(tracing_cpumask_new); 4252 free_cpumask_var(tracing_cpumask_new);
4258 4253
4259 return count; 4254 return count;
@@ -6780,7 +6775,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6780 .spd_release = buffer_spd_release, 6775 .spd_release = buffer_spd_release,
6781 }; 6776 };
6782 struct buffer_ref *ref; 6777 struct buffer_ref *ref;
6783 int entries, size, i; 6778 int entries, i;
6784 ssize_t ret = 0; 6779 ssize_t ret = 0;
6785 6780
6786#ifdef CONFIG_TRACER_MAX_TRACE 6781#ifdef CONFIG_TRACER_MAX_TRACE
@@ -6834,14 +6829,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6834 break; 6829 break;
6835 } 6830 }
6836 6831
6837 /*
6838 * zero out any left over data, this is going to
6839 * user land.
6840 */
6841 size = ring_buffer_page_len(ref->page);
6842 if (size < PAGE_SIZE)
6843 memset(ref->page + size, 0, PAGE_SIZE - size);
6844
6845 page = virt_to_page(ref->page); 6832 page = virt_to_page(ref->page);
6846 6833
6847 spd.pages[i] = page; 6834 spd.pages[i] = page;
@@ -7599,6 +7586,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
7599 buf->data = alloc_percpu(struct trace_array_cpu); 7586 buf->data = alloc_percpu(struct trace_array_cpu);
7600 if (!buf->data) { 7587 if (!buf->data) {
7601 ring_buffer_free(buf->buffer); 7588 ring_buffer_free(buf->buffer);
7589 buf->buffer = NULL;
7602 return -ENOMEM; 7590 return -ENOMEM;
7603 } 7591 }
7604 7592
@@ -7622,7 +7610,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
7622 allocate_snapshot ? size : 1); 7610 allocate_snapshot ? size : 1);
7623 if (WARN_ON(ret)) { 7611 if (WARN_ON(ret)) {
7624 ring_buffer_free(tr->trace_buffer.buffer); 7612 ring_buffer_free(tr->trace_buffer.buffer);
7613 tr->trace_buffer.buffer = NULL;
7625 free_percpu(tr->trace_buffer.data); 7614 free_percpu(tr->trace_buffer.data);
7615 tr->trace_buffer.data = NULL;
7626 return -ENOMEM; 7616 return -ENOMEM;
7627 } 7617 }
7628 tr->allocated_snapshot = allocate_snapshot; 7618 tr->allocated_snapshot = allocate_snapshot;