aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c105
-rw-r--r--kernel/trace/ring_buffer.c26
-rw-r--r--kernel/trace/trace.c254
-rw-r--r--kernel/trace/trace.h21
-rw-r--r--kernel/trace/trace_event_perf.c10
-rw-r--r--kernel/trace/trace_events.c292
-rw-r--r--kernel/trace/trace_events_filter.c21
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_functions_graph.c54
-rw-r--r--kernel/trace/trace_kprobe.c50
-rw-r--r--kernel/trace/trace_mmiotrace.c8
-rw-r--r--kernel/trace/trace_output.c14
-rw-r--r--kernel/trace/trace_printk.c19
-rw-r--r--kernel/trace/trace_syscalls.c26
-rw-r--r--kernel/trace/trace_uprobe.c53
15 files changed, 550 insertions, 405 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 67708f46baae..a6d098c6df3f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1441,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1441 * the hashes are freed with call_rcu_sched(). 1441 * the hashes are freed with call_rcu_sched().
1442 */ 1442 */
1443static int 1443static int
1444ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) 1444ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1445{ 1445{
1446 struct ftrace_hash *filter_hash; 1446 struct ftrace_hash *filter_hash;
1447 struct ftrace_hash *notrace_hash; 1447 struct ftrace_hash *notrace_hash;
1448 int ret; 1448 int ret;
1449 1449
1450#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1451 /*
1452 * There's a small race when adding ops that the ftrace handler
1453 * that wants regs, may be called without them. We can not
1454 * allow that handler to be called if regs is NULL.
1455 */
1456 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1457 return 0;
1458#endif
1459
1450 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); 1460 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1451 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); 1461 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1452 1462
@@ -2159,12 +2169,57 @@ static cycle_t ftrace_update_time;
2159static unsigned long ftrace_update_cnt; 2169static unsigned long ftrace_update_cnt;
2160unsigned long ftrace_update_tot_cnt; 2170unsigned long ftrace_update_tot_cnt;
2161 2171
2162static int ops_traces_mod(struct ftrace_ops *ops) 2172static inline int ops_traces_mod(struct ftrace_ops *ops)
2163{ 2173{
2164 struct ftrace_hash *hash; 2174 /*
2175 * Filter_hash being empty will default to trace module.
2176 * But notrace hash requires a test of individual module functions.
2177 */
2178 return ftrace_hash_empty(ops->filter_hash) &&
2179 ftrace_hash_empty(ops->notrace_hash);
2180}
2181
2182/*
2183 * Check if the current ops references the record.
2184 *
2185 * If the ops traces all functions, then it was already accounted for.
2186 * If the ops does not trace the current record function, skip it.
2187 * If the ops ignores the function via notrace filter, skip it.
2188 */
2189static inline bool
2190ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2191{
2192 /* If ops isn't enabled, ignore it */
2193 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2194 return 0;
2195
2196 /* If ops traces all mods, we already accounted for it */
2197 if (ops_traces_mod(ops))
2198 return 0;
2199
2200 /* The function must be in the filter */
2201 if (!ftrace_hash_empty(ops->filter_hash) &&
2202 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2203 return 0;
2204
2205 /* If in notrace hash, we ignore it too */
2206 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2207 return 0;
2165 2208
2166 hash = ops->filter_hash; 2209 return 1;
2167 return ftrace_hash_empty(hash); 2210}
2211
2212static int referenced_filters(struct dyn_ftrace *rec)
2213{
2214 struct ftrace_ops *ops;
2215 int cnt = 0;
2216
2217 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2218 if (ops_references_rec(ops, rec))
2219 cnt++;
2220 }
2221
2222 return cnt;
2168} 2223}
2169 2224
2170static int ftrace_update_code(struct module *mod) 2225static int ftrace_update_code(struct module *mod)
@@ -2173,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
2173 struct dyn_ftrace *p; 2228 struct dyn_ftrace *p;
2174 cycle_t start, stop; 2229 cycle_t start, stop;
2175 unsigned long ref = 0; 2230 unsigned long ref = 0;
2231 bool test = false;
2176 int i; 2232 int i;
2177 2233
2178 /* 2234 /*
@@ -2186,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
2186 2242
2187 for (ops = ftrace_ops_list; 2243 for (ops = ftrace_ops_list;
2188 ops != &ftrace_list_end; ops = ops->next) { 2244 ops != &ftrace_list_end; ops = ops->next) {
2189 if (ops->flags & FTRACE_OPS_FL_ENABLED && 2245 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2190 ops_traces_mod(ops)) 2246 if (ops_traces_mod(ops))
2191 ref++; 2247 ref++;
2248 else
2249 test = true;
2250 }
2192 } 2251 }
2193 } 2252 }
2194 2253
@@ -2198,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
2198 for (pg = ftrace_new_pgs; pg; pg = pg->next) { 2257 for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2199 2258
2200 for (i = 0; i < pg->index; i++) { 2259 for (i = 0; i < pg->index; i++) {
2260 int cnt = ref;
2261
2201 /* If something went wrong, bail without enabling anything */ 2262 /* If something went wrong, bail without enabling anything */
2202 if (unlikely(ftrace_disabled)) 2263 if (unlikely(ftrace_disabled))
2203 return -1; 2264 return -1;
2204 2265
2205 p = &pg->records[i]; 2266 p = &pg->records[i];
2206 p->flags = ref; 2267 if (test)
2268 cnt += referenced_filters(p);
2269 p->flags = cnt;
2207 2270
2208 /* 2271 /*
2209 * Do the initial record conversion from mcount jump 2272 * Do the initial record conversion from mcount jump
@@ -2223,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
2223 * conversion puts the module to the correct state, thus 2286 * conversion puts the module to the correct state, thus
2224 * passing the ftrace_make_call check. 2287 * passing the ftrace_make_call check.
2225 */ 2288 */
2226 if (ftrace_start_up && ref) { 2289 if (ftrace_start_up && cnt) {
2227 int failed = __ftrace_replace_code(p, 1); 2290 int failed = __ftrace_replace_code(p, 1);
2228 if (failed) 2291 if (failed)
2229 ftrace_bug(failed, p->ip); 2292 ftrace_bug(failed, p->ip);
@@ -3374,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3374 return add_hash_entry(hash, ip); 3437 return add_hash_entry(hash, ip);
3375} 3438}
3376 3439
3440static void ftrace_ops_update_code(struct ftrace_ops *ops)
3441{
3442 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3443 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3444}
3445
3377static int 3446static int
3378ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 3447ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3379 unsigned long ip, int remove, int reset, int enable) 3448 unsigned long ip, int remove, int reset, int enable)
@@ -3416,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3416 3485
3417 mutex_lock(&ftrace_lock); 3486 mutex_lock(&ftrace_lock);
3418 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3487 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3419 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED 3488 if (!ret)
3420 && ftrace_enabled) 3489 ftrace_ops_update_code(ops);
3421 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3422 3490
3423 mutex_unlock(&ftrace_lock); 3491 mutex_unlock(&ftrace_lock);
3424 3492
@@ -3645,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3645 mutex_lock(&ftrace_lock); 3713 mutex_lock(&ftrace_lock);
3646 ret = ftrace_hash_move(iter->ops, filter_hash, 3714 ret = ftrace_hash_move(iter->ops, filter_hash,
3647 orig_hash, iter->hash); 3715 orig_hash, iter->hash);
3648 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) 3716 if (!ret)
3649 && ftrace_enabled) 3717 ftrace_ops_update_code(iter->ops);
3650 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3651 3718
3652 mutex_unlock(&ftrace_lock); 3719 mutex_unlock(&ftrace_lock);
3653 } 3720 }
@@ -4218,7 +4285,7 @@ static inline void ftrace_startup_enable(int command) { }
4218# define ftrace_shutdown_sysctl() do { } while (0) 4285# define ftrace_shutdown_sysctl() do { } while (0)
4219 4286
4220static inline int 4287static inline int
4221ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) 4288ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4222{ 4289{
4223 return 1; 4290 return 1;
4224} 4291}
@@ -4241,7 +4308,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4241 do_for_each_ftrace_op(op, ftrace_control_list) { 4308 do_for_each_ftrace_op(op, ftrace_control_list) {
4242 if (!(op->flags & FTRACE_OPS_FL_STUB) && 4309 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4243 !ftrace_function_local_disabled(op) && 4310 !ftrace_function_local_disabled(op) &&
4244 ftrace_ops_test(op, ip)) 4311 ftrace_ops_test(op, ip, regs))
4245 op->func(ip, parent_ip, op, regs); 4312 op->func(ip, parent_ip, op, regs);
4246 } while_for_each_ftrace_op(op); 4313 } while_for_each_ftrace_op(op);
4247 trace_recursion_clear(TRACE_CONTROL_BIT); 4314 trace_recursion_clear(TRACE_CONTROL_BIT);
@@ -4274,7 +4341,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4274 */ 4341 */
4275 preempt_disable_notrace(); 4342 preempt_disable_notrace();
4276 do_for_each_ftrace_op(op, ftrace_ops_list) { 4343 do_for_each_ftrace_op(op, ftrace_ops_list) {
4277 if (ftrace_ops_test(op, ip)) 4344 if (ftrace_ops_test(op, ip, regs))
4278 op->func(ip, parent_ip, op, regs); 4345 op->func(ip, parent_ip, op, regs);
4279 } while_for_each_ftrace_op(op); 4346 } while_for_each_ftrace_op(op);
4280 preempt_enable_notrace(); 4347 preempt_enable_notrace();
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e444ff88f0a4..cc2f66f68dc5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -36,11 +36,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
36{ 36{
37 int ret; 37 int ret;
38 38
39 ret = trace_seq_printf(s, "# compressed entry header\n"); 39 ret = trace_seq_puts(s, "# compressed entry header\n");
40 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); 40 ret = trace_seq_puts(s, "\ttype_len : 5 bits\n");
41 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); 41 ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n");
42 ret = trace_seq_printf(s, "\tarray : 32 bits\n"); 42 ret = trace_seq_puts(s, "\tarray : 32 bits\n");
43 ret = trace_seq_printf(s, "\n"); 43 ret = trace_seq_putc(s, '\n');
44 ret = trace_seq_printf(s, "\tpadding : type == %d\n", 44 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
45 RINGBUF_TYPE_PADDING); 45 RINGBUF_TYPE_PADDING);
46 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", 46 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
@@ -1066,7 +1066,7 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1066} 1066}
1067 1067
1068/** 1068/**
1069 * check_pages - integrity check of buffer pages 1069 * rb_check_pages - integrity check of buffer pages
1070 * @cpu_buffer: CPU buffer with pages to test 1070 * @cpu_buffer: CPU buffer with pages to test
1071 * 1071 *
1072 * As a safety measure we check to make sure the data pages have not 1072 * As a safety measure we check to make sure the data pages have not
@@ -1258,7 +1258,7 @@ static int rb_cpu_notify(struct notifier_block *self,
1258#endif 1258#endif
1259 1259
1260/** 1260/**
1261 * ring_buffer_alloc - allocate a new ring_buffer 1261 * __ring_buffer_alloc - allocate a new ring_buffer
1262 * @size: the size in bytes per cpu that is needed. 1262 * @size: the size in bytes per cpu that is needed.
1263 * @flags: attributes to set for the ring buffer. 1263 * @flags: attributes to set for the ring buffer.
1264 * 1264 *
@@ -1607,6 +1607,7 @@ static void update_pages_handler(struct work_struct *work)
1607 * ring_buffer_resize - resize the ring buffer 1607 * ring_buffer_resize - resize the ring buffer
1608 * @buffer: the buffer to resize. 1608 * @buffer: the buffer to resize.
1609 * @size: the new size. 1609 * @size: the new size.
1610 * @cpu_id: the cpu buffer to resize
1610 * 1611 *
1611 * Minimum size is 2 * BUF_PAGE_SIZE. 1612 * Minimum size is 2 * BUF_PAGE_SIZE.
1612 * 1613 *
@@ -3956,11 +3957,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
3956 * expected. 3957 * expected.
3957 * 3958 *
3958 * After a sequence of ring_buffer_read_prepare calls, the user is 3959 * After a sequence of ring_buffer_read_prepare calls, the user is
3959 * expected to make at least one call to ring_buffer_prepare_sync. 3960 * expected to make at least one call to ring_buffer_read_prepare_sync.
3960 * Afterwards, ring_buffer_read_start is invoked to get things going 3961 * Afterwards, ring_buffer_read_start is invoked to get things going
3961 * for real. 3962 * for real.
3962 * 3963 *
3963 * This overall must be paired with ring_buffer_finish. 3964 * This overall must be paired with ring_buffer_read_finish.
3964 */ 3965 */
3965struct ring_buffer_iter * 3966struct ring_buffer_iter *
3966ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) 3967ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
@@ -4009,7 +4010,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4009 * an intervening ring_buffer_read_prepare_sync must have been 4010 * an intervening ring_buffer_read_prepare_sync must have been
4010 * performed. 4011 * performed.
4011 * 4012 *
4012 * Must be paired with ring_buffer_finish. 4013 * Must be paired with ring_buffer_read_finish.
4013 */ 4014 */
4014void 4015void
4015ring_buffer_read_start(struct ring_buffer_iter *iter) 4016ring_buffer_read_start(struct ring_buffer_iter *iter)
@@ -4031,7 +4032,7 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
4031EXPORT_SYMBOL_GPL(ring_buffer_read_start); 4032EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4032 4033
4033/** 4034/**
4034 * ring_buffer_finish - finish reading the iterator of the buffer 4035 * ring_buffer_read_finish - finish reading the iterator of the buffer
4035 * @iter: The iterator retrieved by ring_buffer_start 4036 * @iter: The iterator retrieved by ring_buffer_start
4036 * 4037 *
4037 * This re-enables the recording to the buffer, and frees the 4038 * This re-enables the recording to the buffer, and frees the
@@ -4346,6 +4347,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4346/** 4347/**
4347 * ring_buffer_alloc_read_page - allocate a page to read from buffer 4348 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4348 * @buffer: the buffer to allocate for. 4349 * @buffer: the buffer to allocate for.
4350 * @cpu: the cpu buffer to allocate.
4349 * 4351 *
4350 * This function is used in conjunction with ring_buffer_read_page. 4352 * This function is used in conjunction with ring_buffer_read_page.
4351 * When reading a full page from the ring buffer, these functions 4353 * When reading a full page from the ring buffer, these functions
@@ -4403,7 +4405,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4403 * to swap with a page in the ring buffer. 4405 * to swap with a page in the ring buffer.
4404 * 4406 *
4405 * for example: 4407 * for example:
4406 * rpage = ring_buffer_alloc_read_page(buffer); 4408 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
4407 * if (!rpage) 4409 * if (!rpage)
4408 * return error; 4410 * return error;
4409 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 4411 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0cd500bffd9b..496f94d57698 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer,
243} 243}
244EXPORT_SYMBOL_GPL(filter_current_check_discard); 244EXPORT_SYMBOL_GPL(filter_current_check_discard);
245 245
246cycle_t ftrace_now(int cpu) 246cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
247{ 247{
248 u64 ts; 248 u64 ts;
249 249
250 /* Early boot up does not have a buffer yet */ 250 /* Early boot up does not have a buffer yet */
251 if (!global_trace.trace_buffer.buffer) 251 if (!buf->buffer)
252 return trace_clock_local(); 252 return trace_clock_local();
253 253
254 ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu); 254 ts = ring_buffer_time_stamp(buf->buffer, cpu);
255 ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts); 255 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
256 256
257 return ts; 257 return ts;
258} 258}
259 259
260cycle_t ftrace_now(int cpu)
261{
262 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
263}
264
260/** 265/**
261 * tracing_is_enabled - Show if global_trace has been disabled 266 * tracing_is_enabled - Show if global_trace has been disabled
262 * 267 *
@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
1211 /* Make sure all commits have finished */ 1216 /* Make sure all commits have finished */
1212 synchronize_sched(); 1217 synchronize_sched();
1213 1218
1214 buf->time_start = ftrace_now(buf->cpu); 1219 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1215 1220
1216 for_each_online_cpu(cpu) 1221 for_each_online_cpu(cpu)
1217 ring_buffer_reset_cpu(buffer, cpu); 1222 ring_buffer_reset_cpu(buffer, cpu);
@@ -1219,23 +1224,17 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
1219 ring_buffer_record_enable(buffer); 1224 ring_buffer_record_enable(buffer);
1220} 1225}
1221 1226
1222void tracing_reset_current(int cpu) 1227/* Must have trace_types_lock held */
1223{
1224 tracing_reset(&global_trace.trace_buffer, cpu);
1225}
1226
1227void tracing_reset_all_online_cpus(void) 1228void tracing_reset_all_online_cpus(void)
1228{ 1229{
1229 struct trace_array *tr; 1230 struct trace_array *tr;
1230 1231
1231 mutex_lock(&trace_types_lock);
1232 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1232 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1233 tracing_reset_online_cpus(&tr->trace_buffer); 1233 tracing_reset_online_cpus(&tr->trace_buffer);
1234#ifdef CONFIG_TRACER_MAX_TRACE 1234#ifdef CONFIG_TRACER_MAX_TRACE
1235 tracing_reset_online_cpus(&tr->max_buffer); 1235 tracing_reset_online_cpus(&tr->max_buffer);
1236#endif 1236#endif
1237 } 1237 }
1238 mutex_unlock(&trace_types_lock);
1239} 1238}
1240 1239
1241#define SAVED_CMDLINES 128 1240#define SAVED_CMDLINES 128
@@ -2843,6 +2842,17 @@ static int s_show(struct seq_file *m, void *v)
2843 return 0; 2842 return 0;
2844} 2843}
2845 2844
2845/*
2846 * Should be used after trace_array_get(), trace_types_lock
2847 * ensures that i_cdev was already initialized.
2848 */
2849static inline int tracing_get_cpu(struct inode *inode)
2850{
2851 if (inode->i_cdev) /* See trace_create_cpu_file() */
2852 return (long)inode->i_cdev - 1;
2853 return RING_BUFFER_ALL_CPUS;
2854}
2855
2846static const struct seq_operations tracer_seq_ops = { 2856static const struct seq_operations tracer_seq_ops = {
2847 .start = s_start, 2857 .start = s_start,
2848 .next = s_next, 2858 .next = s_next,
@@ -2851,9 +2861,9 @@ static const struct seq_operations tracer_seq_ops = {
2851}; 2861};
2852 2862
2853static struct trace_iterator * 2863static struct trace_iterator *
2854__tracing_open(struct trace_array *tr, struct trace_cpu *tc, 2864__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2855 struct inode *inode, struct file *file, bool snapshot)
2856{ 2865{
2866 struct trace_array *tr = inode->i_private;
2857 struct trace_iterator *iter; 2867 struct trace_iterator *iter;
2858 int cpu; 2868 int cpu;
2859 2869
@@ -2894,8 +2904,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
2894 iter->trace_buffer = &tr->trace_buffer; 2904 iter->trace_buffer = &tr->trace_buffer;
2895 iter->snapshot = snapshot; 2905 iter->snapshot = snapshot;
2896 iter->pos = -1; 2906 iter->pos = -1;
2907 iter->cpu_file = tracing_get_cpu(inode);
2897 mutex_init(&iter->mutex); 2908 mutex_init(&iter->mutex);
2898 iter->cpu_file = tc->cpu;
2899 2909
2900 /* Notify the tracer early; before we stop tracing. */ 2910 /* Notify the tracer early; before we stop tracing. */
2901 if (iter->trace && iter->trace->open) 2911 if (iter->trace && iter->trace->open)
@@ -2971,45 +2981,22 @@ static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
2971 filp->private_data = inode->i_private; 2981 filp->private_data = inode->i_private;
2972 2982
2973 return 0; 2983 return 0;
2974
2975}
2976
2977static int tracing_open_generic_tc(struct inode *inode, struct file *filp)
2978{
2979 struct trace_cpu *tc = inode->i_private;
2980 struct trace_array *tr = tc->tr;
2981
2982 if (tracing_disabled)
2983 return -ENODEV;
2984
2985 if (trace_array_get(tr) < 0)
2986 return -ENODEV;
2987
2988 filp->private_data = inode->i_private;
2989
2990 return 0;
2991
2992} 2984}
2993 2985
2994static int tracing_release(struct inode *inode, struct file *file) 2986static int tracing_release(struct inode *inode, struct file *file)
2995{ 2987{
2988 struct trace_array *tr = inode->i_private;
2996 struct seq_file *m = file->private_data; 2989 struct seq_file *m = file->private_data;
2997 struct trace_iterator *iter; 2990 struct trace_iterator *iter;
2998 struct trace_array *tr;
2999 int cpu; 2991 int cpu;
3000 2992
3001 /* Writes do not use seq_file, need to grab tr from inode */
3002 if (!(file->f_mode & FMODE_READ)) { 2993 if (!(file->f_mode & FMODE_READ)) {
3003 struct trace_cpu *tc = inode->i_private; 2994 trace_array_put(tr);
3004
3005 trace_array_put(tc->tr);
3006 return 0; 2995 return 0;
3007 } 2996 }
3008 2997
2998 /* Writes do not use seq_file */
3009 iter = m->private; 2999 iter = m->private;
3010 tr = iter->tr;
3011 trace_array_put(tr);
3012
3013 mutex_lock(&trace_types_lock); 3000 mutex_lock(&trace_types_lock);
3014 3001
3015 for_each_tracing_cpu(cpu) { 3002 for_each_tracing_cpu(cpu) {
@@ -3023,6 +3010,9 @@ static int tracing_release(struct inode *inode, struct file *file)
3023 if (!iter->snapshot) 3010 if (!iter->snapshot)
3024 /* reenable tracing if it was previously enabled */ 3011 /* reenable tracing if it was previously enabled */
3025 tracing_start_tr(tr); 3012 tracing_start_tr(tr);
3013
3014 __trace_array_put(tr);
3015
3026 mutex_unlock(&trace_types_lock); 3016 mutex_unlock(&trace_types_lock);
3027 3017
3028 mutex_destroy(&iter->mutex); 3018 mutex_destroy(&iter->mutex);
@@ -3042,15 +3032,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3042 return 0; 3032 return 0;
3043} 3033}
3044 3034
3045static int tracing_release_generic_tc(struct inode *inode, struct file *file)
3046{
3047 struct trace_cpu *tc = inode->i_private;
3048 struct trace_array *tr = tc->tr;
3049
3050 trace_array_put(tr);
3051 return 0;
3052}
3053
3054static int tracing_single_release_tr(struct inode *inode, struct file *file) 3035static int tracing_single_release_tr(struct inode *inode, struct file *file)
3055{ 3036{
3056 struct trace_array *tr = inode->i_private; 3037 struct trace_array *tr = inode->i_private;
@@ -3062,8 +3043,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file)
3062 3043
3063static int tracing_open(struct inode *inode, struct file *file) 3044static int tracing_open(struct inode *inode, struct file *file)
3064{ 3045{
3065 struct trace_cpu *tc = inode->i_private; 3046 struct trace_array *tr = inode->i_private;
3066 struct trace_array *tr = tc->tr;
3067 struct trace_iterator *iter; 3047 struct trace_iterator *iter;
3068 int ret = 0; 3048 int ret = 0;
3069 3049
@@ -3071,16 +3051,17 @@ static int tracing_open(struct inode *inode, struct file *file)
3071 return -ENODEV; 3051 return -ENODEV;
3072 3052
3073 /* If this file was open for write, then erase contents */ 3053 /* If this file was open for write, then erase contents */
3074 if ((file->f_mode & FMODE_WRITE) && 3054 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3075 (file->f_flags & O_TRUNC)) { 3055 int cpu = tracing_get_cpu(inode);
3076 if (tc->cpu == RING_BUFFER_ALL_CPUS) 3056
3057 if (cpu == RING_BUFFER_ALL_CPUS)
3077 tracing_reset_online_cpus(&tr->trace_buffer); 3058 tracing_reset_online_cpus(&tr->trace_buffer);
3078 else 3059 else
3079 tracing_reset(&tr->trace_buffer, tc->cpu); 3060 tracing_reset(&tr->trace_buffer, cpu);
3080 } 3061 }
3081 3062
3082 if (file->f_mode & FMODE_READ) { 3063 if (file->f_mode & FMODE_READ) {
3083 iter = __tracing_open(tr, tc, inode, file, false); 3064 iter = __tracing_open(inode, file, false);
3084 if (IS_ERR(iter)) 3065 if (IS_ERR(iter))
3085 ret = PTR_ERR(iter); 3066 ret = PTR_ERR(iter);
3086 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 3067 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -3447,6 +3428,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3447static int tracing_trace_options_open(struct inode *inode, struct file *file) 3428static int tracing_trace_options_open(struct inode *inode, struct file *file)
3448{ 3429{
3449 struct trace_array *tr = inode->i_private; 3430 struct trace_array *tr = inode->i_private;
3431 int ret;
3450 3432
3451 if (tracing_disabled) 3433 if (tracing_disabled)
3452 return -ENODEV; 3434 return -ENODEV;
@@ -3454,7 +3436,11 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
3454 if (trace_array_get(tr) < 0) 3436 if (trace_array_get(tr) < 0)
3455 return -ENODEV; 3437 return -ENODEV;
3456 3438
3457 return single_open(file, tracing_trace_options_show, inode->i_private); 3439 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3440 if (ret < 0)
3441 trace_array_put(tr);
3442
3443 return ret;
3458} 3444}
3459 3445
3460static const struct file_operations tracing_iter_fops = { 3446static const struct file_operations tracing_iter_fops = {
@@ -3537,14 +3523,14 @@ static const char readme_msg[] =
3537 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" 3523 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
3538 "\t\t\t Read the contents for more information\n" 3524 "\t\t\t Read the contents for more information\n"
3539#endif 3525#endif
3540#ifdef CONFIG_STACKTRACE 3526#ifdef CONFIG_STACK_TRACER
3541 " stack_trace\t\t- Shows the max stack trace when active\n" 3527 " stack_trace\t\t- Shows the max stack trace when active\n"
3542 " stack_max_size\t- Shows current max stack size that was traced\n" 3528 " stack_max_size\t- Shows current max stack size that was traced\n"
3543 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" 3529 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n"
3544#ifdef CONFIG_DYNAMIC_FTRACE 3530#ifdef CONFIG_DYNAMIC_FTRACE
3545 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" 3531 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
3546#endif 3532#endif
3547#endif /* CONFIG_STACKTRACE */ 3533#endif /* CONFIG_STACK_TRACER */
3548; 3534;
3549 3535
3550static ssize_t 3536static ssize_t
@@ -3941,8 +3927,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3941 3927
3942static int tracing_open_pipe(struct inode *inode, struct file *filp) 3928static int tracing_open_pipe(struct inode *inode, struct file *filp)
3943{ 3929{
3944 struct trace_cpu *tc = inode->i_private; 3930 struct trace_array *tr = inode->i_private;
3945 struct trace_array *tr = tc->tr;
3946 struct trace_iterator *iter; 3931 struct trace_iterator *iter;
3947 int ret = 0; 3932 int ret = 0;
3948 3933
@@ -3958,6 +3943,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3958 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3943 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3959 if (!iter) { 3944 if (!iter) {
3960 ret = -ENOMEM; 3945 ret = -ENOMEM;
3946 __trace_array_put(tr);
3961 goto out; 3947 goto out;
3962 } 3948 }
3963 3949
@@ -3987,9 +3973,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3987 if (trace_clocks[tr->clock_id].in_ns) 3973 if (trace_clocks[tr->clock_id].in_ns)
3988 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3974 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3989 3975
3990 iter->cpu_file = tc->cpu; 3976 iter->tr = tr;
3991 iter->tr = tc->tr; 3977 iter->trace_buffer = &tr->trace_buffer;
3992 iter->trace_buffer = &tc->tr->trace_buffer; 3978 iter->cpu_file = tracing_get_cpu(inode);
3993 mutex_init(&iter->mutex); 3979 mutex_init(&iter->mutex);
3994 filp->private_data = iter; 3980 filp->private_data = iter;
3995 3981
@@ -4012,8 +3998,7 @@ fail:
4012static int tracing_release_pipe(struct inode *inode, struct file *file) 3998static int tracing_release_pipe(struct inode *inode, struct file *file)
4013{ 3999{
4014 struct trace_iterator *iter = file->private_data; 4000 struct trace_iterator *iter = file->private_data;
4015 struct trace_cpu *tc = inode->i_private; 4001 struct trace_array *tr = inode->i_private;
4016 struct trace_array *tr = tc->tr;
4017 4002
4018 mutex_lock(&trace_types_lock); 4003 mutex_lock(&trace_types_lock);
4019 4004
@@ -4166,6 +4151,7 @@ waitagain:
4166 memset(&iter->seq, 0, 4151 memset(&iter->seq, 0,
4167 sizeof(struct trace_iterator) - 4152 sizeof(struct trace_iterator) -
4168 offsetof(struct trace_iterator, seq)); 4153 offsetof(struct trace_iterator, seq));
4154 cpumask_clear(iter->started);
4169 iter->pos = -1; 4155 iter->pos = -1;
4170 4156
4171 trace_event_read_lock(); 4157 trace_event_read_lock();
@@ -4366,15 +4352,16 @@ static ssize_t
4366tracing_entries_read(struct file *filp, char __user *ubuf, 4352tracing_entries_read(struct file *filp, char __user *ubuf,
4367 size_t cnt, loff_t *ppos) 4353 size_t cnt, loff_t *ppos)
4368{ 4354{
4369 struct trace_cpu *tc = filp->private_data; 4355 struct inode *inode = file_inode(filp);
4370 struct trace_array *tr = tc->tr; 4356 struct trace_array *tr = inode->i_private;
4357 int cpu = tracing_get_cpu(inode);
4371 char buf[64]; 4358 char buf[64];
4372 int r = 0; 4359 int r = 0;
4373 ssize_t ret; 4360 ssize_t ret;
4374 4361
4375 mutex_lock(&trace_types_lock); 4362 mutex_lock(&trace_types_lock);
4376 4363
4377 if (tc->cpu == RING_BUFFER_ALL_CPUS) { 4364 if (cpu == RING_BUFFER_ALL_CPUS) {
4378 int cpu, buf_size_same; 4365 int cpu, buf_size_same;
4379 unsigned long size; 4366 unsigned long size;
4380 4367
@@ -4401,7 +4388,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
4401 } else 4388 } else
4402 r = sprintf(buf, "X\n"); 4389 r = sprintf(buf, "X\n");
4403 } else 4390 } else
4404 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10); 4391 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4405 4392
4406 mutex_unlock(&trace_types_lock); 4393 mutex_unlock(&trace_types_lock);
4407 4394
@@ -4413,7 +4400,8 @@ static ssize_t
4413tracing_entries_write(struct file *filp, const char __user *ubuf, 4400tracing_entries_write(struct file *filp, const char __user *ubuf,
4414 size_t cnt, loff_t *ppos) 4401 size_t cnt, loff_t *ppos)
4415{ 4402{
4416 struct trace_cpu *tc = filp->private_data; 4403 struct inode *inode = file_inode(filp);
4404 struct trace_array *tr = inode->i_private;
4417 unsigned long val; 4405 unsigned long val;
4418 int ret; 4406 int ret;
4419 4407
@@ -4427,8 +4415,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
4427 4415
4428 /* value is in KB */ 4416 /* value is in KB */
4429 val <<= 10; 4417 val <<= 10;
4430 4418 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4431 ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
4432 if (ret < 0) 4419 if (ret < 0)
4433 return ret; 4420 return ret;
4434 4421
@@ -4482,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
4482 4469
4483 /* disable tracing ? */ 4470 /* disable tracing ? */
4484 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 4471 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4485 tracing_off(); 4472 tracer_tracing_off(tr);
4486 /* resize the ring buffer to 0 */ 4473 /* resize the ring buffer to 0 */
4487 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 4474 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4488 4475
@@ -4647,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4647 * New clock may not be consistent with the previous clock. 4634 * New clock may not be consistent with the previous clock.
4648 * Reset the buffer so that it doesn't have incomparable timestamps. 4635 * Reset the buffer so that it doesn't have incomparable timestamps.
4649 */ 4636 */
4650 tracing_reset_online_cpus(&global_trace.trace_buffer); 4637 tracing_reset_online_cpus(&tr->trace_buffer);
4651 4638
4652#ifdef CONFIG_TRACER_MAX_TRACE 4639#ifdef CONFIG_TRACER_MAX_TRACE
4653 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) 4640 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4654 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 4641 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4655 tracing_reset_online_cpus(&global_trace.max_buffer); 4642 tracing_reset_online_cpus(&tr->max_buffer);
4656#endif 4643#endif
4657 4644
4658 mutex_unlock(&trace_types_lock); 4645 mutex_unlock(&trace_types_lock);
@@ -4689,8 +4676,7 @@ struct ftrace_buffer_info {
4689#ifdef CONFIG_TRACER_SNAPSHOT 4676#ifdef CONFIG_TRACER_SNAPSHOT
4690static int tracing_snapshot_open(struct inode *inode, struct file *file) 4677static int tracing_snapshot_open(struct inode *inode, struct file *file)
4691{ 4678{
4692 struct trace_cpu *tc = inode->i_private; 4679 struct trace_array *tr = inode->i_private;
4693 struct trace_array *tr = tc->tr;
4694 struct trace_iterator *iter; 4680 struct trace_iterator *iter;
4695 struct seq_file *m; 4681 struct seq_file *m;
4696 int ret = 0; 4682 int ret = 0;
@@ -4699,26 +4685,29 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
4699 return -ENODEV; 4685 return -ENODEV;
4700 4686
4701 if (file->f_mode & FMODE_READ) { 4687 if (file->f_mode & FMODE_READ) {
4702 iter = __tracing_open(tr, tc, inode, file, true); 4688 iter = __tracing_open(inode, file, true);
4703 if (IS_ERR(iter)) 4689 if (IS_ERR(iter))
4704 ret = PTR_ERR(iter); 4690 ret = PTR_ERR(iter);
4705 } else { 4691 } else {
4706 /* Writes still need the seq_file to hold the private data */ 4692 /* Writes still need the seq_file to hold the private data */
4693 ret = -ENOMEM;
4707 m = kzalloc(sizeof(*m), GFP_KERNEL); 4694 m = kzalloc(sizeof(*m), GFP_KERNEL);
4708 if (!m) 4695 if (!m)
4709 return -ENOMEM; 4696 goto out;
4710 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 4697 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4711 if (!iter) { 4698 if (!iter) {
4712 kfree(m); 4699 kfree(m);
4713 return -ENOMEM; 4700 goto out;
4714 } 4701 }
4702 ret = 0;
4703
4715 iter->tr = tr; 4704 iter->tr = tr;
4716 iter->trace_buffer = &tc->tr->max_buffer; 4705 iter->trace_buffer = &tr->max_buffer;
4717 iter->cpu_file = tc->cpu; 4706 iter->cpu_file = tracing_get_cpu(inode);
4718 m->private = iter; 4707 m->private = iter;
4719 file->private_data = m; 4708 file->private_data = m;
4720 } 4709 }
4721 4710out:
4722 if (ret < 0) 4711 if (ret < 0)
4723 trace_array_put(tr); 4712 trace_array_put(tr);
4724 4713
@@ -4873,11 +4862,11 @@ static const struct file_operations tracing_pipe_fops = {
4873}; 4862};
4874 4863
4875static const struct file_operations tracing_entries_fops = { 4864static const struct file_operations tracing_entries_fops = {
4876 .open = tracing_open_generic_tc, 4865 .open = tracing_open_generic_tr,
4877 .read = tracing_entries_read, 4866 .read = tracing_entries_read,
4878 .write = tracing_entries_write, 4867 .write = tracing_entries_write,
4879 .llseek = generic_file_llseek, 4868 .llseek = generic_file_llseek,
4880 .release = tracing_release_generic_tc, 4869 .release = tracing_release_generic_tr,
4881}; 4870};
4882 4871
4883static const struct file_operations tracing_total_entries_fops = { 4872static const struct file_operations tracing_total_entries_fops = {
@@ -4929,8 +4918,7 @@ static const struct file_operations snapshot_raw_fops = {
4929 4918
4930static int tracing_buffers_open(struct inode *inode, struct file *filp) 4919static int tracing_buffers_open(struct inode *inode, struct file *filp)
4931{ 4920{
4932 struct trace_cpu *tc = inode->i_private; 4921 struct trace_array *tr = inode->i_private;
4933 struct trace_array *tr = tc->tr;
4934 struct ftrace_buffer_info *info; 4922 struct ftrace_buffer_info *info;
4935 int ret; 4923 int ret;
4936 4924
@@ -4948,10 +4936,8 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
4948 4936
4949 mutex_lock(&trace_types_lock); 4937 mutex_lock(&trace_types_lock);
4950 4938
4951 tr->ref++;
4952
4953 info->iter.tr = tr; 4939 info->iter.tr = tr;
4954 info->iter.cpu_file = tc->cpu; 4940 info->iter.cpu_file = tracing_get_cpu(inode);
4955 info->iter.trace = tr->current_trace; 4941 info->iter.trace = tr->current_trace;
4956 info->iter.trace_buffer = &tr->trace_buffer; 4942 info->iter.trace_buffer = &tr->trace_buffer;
4957 info->spare = NULL; 4943 info->spare = NULL;
@@ -5268,14 +5254,14 @@ static ssize_t
5268tracing_stats_read(struct file *filp, char __user *ubuf, 5254tracing_stats_read(struct file *filp, char __user *ubuf,
5269 size_t count, loff_t *ppos) 5255 size_t count, loff_t *ppos)
5270{ 5256{
5271 struct trace_cpu *tc = filp->private_data; 5257 struct inode *inode = file_inode(filp);
5272 struct trace_array *tr = tc->tr; 5258 struct trace_array *tr = inode->i_private;
5273 struct trace_buffer *trace_buf = &tr->trace_buffer; 5259 struct trace_buffer *trace_buf = &tr->trace_buffer;
5260 int cpu = tracing_get_cpu(inode);
5274 struct trace_seq *s; 5261 struct trace_seq *s;
5275 unsigned long cnt; 5262 unsigned long cnt;
5276 unsigned long long t; 5263 unsigned long long t;
5277 unsigned long usec_rem; 5264 unsigned long usec_rem;
5278 int cpu = tc->cpu;
5279 5265
5280 s = kmalloc(sizeof(*s), GFP_KERNEL); 5266 s = kmalloc(sizeof(*s), GFP_KERNEL);
5281 if (!s) 5267 if (!s)
@@ -5328,9 +5314,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
5328} 5314}
5329 5315
5330static const struct file_operations tracing_stats_fops = { 5316static const struct file_operations tracing_stats_fops = {
5331 .open = tracing_open_generic, 5317 .open = tracing_open_generic_tr,
5332 .read = tracing_stats_read, 5318 .read = tracing_stats_read,
5333 .llseek = generic_file_llseek, 5319 .llseek = generic_file_llseek,
5320 .release = tracing_release_generic_tr,
5334}; 5321};
5335 5322
5336#ifdef CONFIG_DYNAMIC_FTRACE 5323#ifdef CONFIG_DYNAMIC_FTRACE
@@ -5519,10 +5506,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5519 return tr->percpu_dir; 5506 return tr->percpu_dir;
5520} 5507}
5521 5508
5509static struct dentry *
5510trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5511 void *data, long cpu, const struct file_operations *fops)
5512{
5513 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5514
5515 if (ret) /* See tracing_get_cpu() */
5516 ret->d_inode->i_cdev = (void *)(cpu + 1);
5517 return ret;
5518}
5519
5522static void 5520static void
5523tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) 5521tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5524{ 5522{
5525 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
5526 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 5523 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5527 struct dentry *d_cpu; 5524 struct dentry *d_cpu;
5528 char cpu_dir[30]; /* 30 characters should be more than enough */ 5525 char cpu_dir[30]; /* 30 characters should be more than enough */
@@ -5538,28 +5535,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5538 } 5535 }
5539 5536
5540 /* per cpu trace_pipe */ 5537 /* per cpu trace_pipe */
5541 trace_create_file("trace_pipe", 0444, d_cpu, 5538 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5542 (void *)&data->trace_cpu, &tracing_pipe_fops); 5539 tr, cpu, &tracing_pipe_fops);
5543 5540
5544 /* per cpu trace */ 5541 /* per cpu trace */
5545 trace_create_file("trace", 0644, d_cpu, 5542 trace_create_cpu_file("trace", 0644, d_cpu,
5546 (void *)&data->trace_cpu, &tracing_fops); 5543 tr, cpu, &tracing_fops);
5547 5544
5548 trace_create_file("trace_pipe_raw", 0444, d_cpu, 5545 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5549 (void *)&data->trace_cpu, &tracing_buffers_fops); 5546 tr, cpu, &tracing_buffers_fops);
5550 5547
5551 trace_create_file("stats", 0444, d_cpu, 5548 trace_create_cpu_file("stats", 0444, d_cpu,
5552 (void *)&data->trace_cpu, &tracing_stats_fops); 5549 tr, cpu, &tracing_stats_fops);
5553 5550
5554 trace_create_file("buffer_size_kb", 0444, d_cpu, 5551 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5555 (void *)&data->trace_cpu, &tracing_entries_fops); 5552 tr, cpu, &tracing_entries_fops);
5556 5553
5557#ifdef CONFIG_TRACER_SNAPSHOT 5554#ifdef CONFIG_TRACER_SNAPSHOT
5558 trace_create_file("snapshot", 0644, d_cpu, 5555 trace_create_cpu_file("snapshot", 0644, d_cpu,
5559 (void *)&data->trace_cpu, &snapshot_fops); 5556 tr, cpu, &snapshot_fops);
5560 5557
5561 trace_create_file("snapshot_raw", 0444, d_cpu, 5558 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5562 (void *)&data->trace_cpu, &snapshot_raw_fops); 5559 tr, cpu, &snapshot_raw_fops);
5563#endif 5560#endif
5564} 5561}
5565 5562
@@ -5868,17 +5865,6 @@ struct dentry *trace_instance_dir;
5868static void 5865static void
5869init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); 5866init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5870 5867
5871static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5872{
5873 int cpu;
5874
5875 for_each_tracing_cpu(cpu) {
5876 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5877 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5878 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5879 }
5880}
5881
5882static int 5868static int
5883allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) 5869allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
5884{ 5870{
@@ -5896,8 +5882,6 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
5896 return -ENOMEM; 5882 return -ENOMEM;
5897 } 5883 }
5898 5884
5899 init_trace_buffers(tr, buf);
5900
5901 /* Allocate the first page for all buffers */ 5885 /* Allocate the first page for all buffers */
5902 set_buffer_entries(&tr->trace_buffer, 5886 set_buffer_entries(&tr->trace_buffer,
5903 ring_buffer_size(tr->trace_buffer.buffer, 0)); 5887 ring_buffer_size(tr->trace_buffer.buffer, 0));
@@ -5964,17 +5948,15 @@ static int new_instance_create(const char *name)
5964 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 5948 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
5965 goto out_free_tr; 5949 goto out_free_tr;
5966 5950
5967 /* Holder for file callbacks */
5968 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5969 tr->trace_cpu.tr = tr;
5970
5971 tr->dir = debugfs_create_dir(name, trace_instance_dir); 5951 tr->dir = debugfs_create_dir(name, trace_instance_dir);
5972 if (!tr->dir) 5952 if (!tr->dir)
5973 goto out_free_tr; 5953 goto out_free_tr;
5974 5954
5975 ret = event_trace_add_tracer(tr->dir, tr); 5955 ret = event_trace_add_tracer(tr->dir, tr);
5976 if (ret) 5956 if (ret) {
5957 debugfs_remove_recursive(tr->dir);
5977 goto out_free_tr; 5958 goto out_free_tr;
5959 }
5978 5960
5979 init_tracer_debugfs(tr, tr->dir); 5961 init_tracer_debugfs(tr, tr->dir);
5980 5962
@@ -6120,13 +6102,13 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6120 tr, &tracing_iter_fops); 6102 tr, &tracing_iter_fops);
6121 6103
6122 trace_create_file("trace", 0644, d_tracer, 6104 trace_create_file("trace", 0644, d_tracer,
6123 (void *)&tr->trace_cpu, &tracing_fops); 6105 tr, &tracing_fops);
6124 6106
6125 trace_create_file("trace_pipe", 0444, d_tracer, 6107 trace_create_file("trace_pipe", 0444, d_tracer,
6126 (void *)&tr->trace_cpu, &tracing_pipe_fops); 6108 tr, &tracing_pipe_fops);
6127 6109
6128 trace_create_file("buffer_size_kb", 0644, d_tracer, 6110 trace_create_file("buffer_size_kb", 0644, d_tracer,
6129 (void *)&tr->trace_cpu, &tracing_entries_fops); 6111 tr, &tracing_entries_fops);
6130 6112
6131 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 6113 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6132 tr, &tracing_total_entries_fops); 6114 tr, &tracing_total_entries_fops);
@@ -6141,11 +6123,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6141 &trace_clock_fops); 6123 &trace_clock_fops);
6142 6124
6143 trace_create_file("tracing_on", 0644, d_tracer, 6125 trace_create_file("tracing_on", 0644, d_tracer,
6144 tr, &rb_simple_fops); 6126 tr, &rb_simple_fops);
6145 6127
6146#ifdef CONFIG_TRACER_SNAPSHOT 6128#ifdef CONFIG_TRACER_SNAPSHOT
6147 trace_create_file("snapshot", 0644, d_tracer, 6129 trace_create_file("snapshot", 0644, d_tracer,
6148 (void *)&tr->trace_cpu, &snapshot_fops); 6130 tr, &snapshot_fops);
6149#endif 6131#endif
6150 6132
6151 for_each_tracing_cpu(cpu) 6133 for_each_tracing_cpu(cpu)
@@ -6439,10 +6421,6 @@ __init static int tracer_alloc_buffers(void)
6439 6421
6440 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 6422 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6441 6423
6442 /* Holder for file callbacks */
6443 global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6444 global_trace.trace_cpu.tr = &global_trace;
6445
6446 INIT_LIST_HEAD(&global_trace.systems); 6424 INIT_LIST_HEAD(&global_trace.systems);
6447 INIT_LIST_HEAD(&global_trace.events); 6425 INIT_LIST_HEAD(&global_trace.events);
6448 list_add(&global_trace.list, &ftrace_trace_arrays); 6426 list_add(&global_trace.list, &ftrace_trace_arrays);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4a4f6e1828b6..fe39acd4c1aa 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -130,19 +130,12 @@ enum trace_flag_type {
130 130
131struct trace_array; 131struct trace_array;
132 132
133struct trace_cpu {
134 struct trace_array *tr;
135 struct dentry *dir;
136 int cpu;
137};
138
139/* 133/*
140 * The CPU trace array - it consists of thousands of trace entries 134 * The CPU trace array - it consists of thousands of trace entries
141 * plus some other descriptor data: (for example which task started 135 * plus some other descriptor data: (for example which task started
142 * the trace, etc.) 136 * the trace, etc.)
143 */ 137 */
144struct trace_array_cpu { 138struct trace_array_cpu {
145 struct trace_cpu trace_cpu;
146 atomic_t disabled; 139 atomic_t disabled;
147 void *buffer_page; /* ring buffer spare */ 140 void *buffer_page; /* ring buffer spare */
148 141
@@ -196,7 +189,6 @@ struct trace_array {
196 bool allocated_snapshot; 189 bool allocated_snapshot;
197#endif 190#endif
198 int buffer_disabled; 191 int buffer_disabled;
199 struct trace_cpu trace_cpu; /* place holder */
200#ifdef CONFIG_FTRACE_SYSCALLS 192#ifdef CONFIG_FTRACE_SYSCALLS
201 int sys_refcount_enter; 193 int sys_refcount_enter;
202 int sys_refcount_exit; 194 int sys_refcount_exit;
@@ -214,7 +206,6 @@ struct trace_array {
214 struct dentry *event_dir; 206 struct dentry *event_dir;
215 struct list_head systems; 207 struct list_head systems;
216 struct list_head events; 208 struct list_head events;
217 struct task_struct *waiter;
218 int ref; 209 int ref;
219}; 210};
220 211
@@ -680,6 +671,15 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
680 struct trace_array *tr); 671 struct trace_array *tr);
681extern int trace_selftest_startup_branch(struct tracer *trace, 672extern int trace_selftest_startup_branch(struct tracer *trace,
682 struct trace_array *tr); 673 struct trace_array *tr);
674/*
675 * Tracer data references selftest functions that only occur
676 * on boot up. These can be __init functions. Thus, when selftests
677 * are enabled, then the tracers need to reference __init functions.
678 */
679#define __tracer_data __refdata
680#else
681/* Tracers are seldom changed. Optimize when selftests are disabled. */
682#define __tracer_data __read_mostly
683#endif /* CONFIG_FTRACE_STARTUP_TEST */ 683#endif /* CONFIG_FTRACE_STARTUP_TEST */
684 684
685extern void *head_page(struct trace_array_cpu *data); 685extern void *head_page(struct trace_array_cpu *data);
@@ -1022,6 +1022,9 @@ extern struct list_head ftrace_events;
1022extern const char *__start___trace_bprintk_fmt[]; 1022extern const char *__start___trace_bprintk_fmt[];
1023extern const char *__stop___trace_bprintk_fmt[]; 1023extern const char *__stop___trace_bprintk_fmt[];
1024 1024
1025extern const char *__start___tracepoint_str[];
1026extern const char *__stop___tracepoint_str[];
1027
1025void trace_printk_init_buffers(void); 1028void trace_printk_init_buffers(void);
1026void trace_printk_start_comm(void); 1029void trace_printk_start_comm(void);
1027int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 1030int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 84b1e045faba..80c36bcf66e8 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -236,6 +236,10 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
236 236
237 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); 237 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
238 238
239 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
240 "perf buffer not large enough"))
241 return NULL;
242
239 pc = preempt_count(); 243 pc = preempt_count();
240 244
241 *rctxp = perf_swevent_get_recursion_context(); 245 *rctxp = perf_swevent_get_recursion_context();
@@ -266,6 +270,10 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
266 struct pt_regs regs; 270 struct pt_regs regs;
267 int rctx; 271 int rctx;
268 272
273 head = this_cpu_ptr(event_function.perf_events);
274 if (hlist_empty(head))
275 return;
276
269#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ 277#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
270 sizeof(u64)) - sizeof(u32)) 278 sizeof(u64)) - sizeof(u32))
271 279
@@ -279,8 +287,6 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
279 287
280 entry->ip = ip; 288 entry->ip = ip;
281 entry->parent_ip = parent_ip; 289 entry->parent_ip = parent_ip;
282
283 head = this_cpu_ptr(event_function.perf_events);
284 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, 290 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
285 1, &regs, head, NULL); 291 1, &regs, head, NULL);
286 292
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7d854290bf81..29a7ebcfb426 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
409 mutex_unlock(&event_mutex); 409 mutex_unlock(&event_mutex);
410} 410}
411 411
412/* 412static void remove_subsystem(struct ftrace_subsystem_dir *dir)
413 * Open and update trace_array ref count.
414 * Must have the current trace_array passed to it.
415 */
416static int tracing_open_generic_file(struct inode *inode, struct file *filp)
417{ 413{
418 struct ftrace_event_file *file = inode->i_private; 414 if (!dir)
419 struct trace_array *tr = file->tr; 415 return;
420 int ret;
421 416
422 if (trace_array_get(tr) < 0) 417 if (!--dir->nr_events) {
423 return -ENODEV; 418 debugfs_remove_recursive(dir->entry);
419 list_del(&dir->list);
420 __put_system_dir(dir);
421 }
422}
424 423
425 ret = tracing_open_generic(inode, filp); 424static void *event_file_data(struct file *filp)
426 if (ret < 0) 425{
427 trace_array_put(tr); 426 return ACCESS_ONCE(file_inode(filp)->i_private);
428 return ret;
429} 427}
430 428
431static int tracing_release_generic_file(struct inode *inode, struct file *filp) 429static void remove_event_file_dir(struct ftrace_event_file *file)
432{ 430{
433 struct ftrace_event_file *file = inode->i_private; 431 struct dentry *dir = file->dir;
434 struct trace_array *tr = file->tr; 432 struct dentry *child;
435 433
436 trace_array_put(tr); 434 if (dir) {
435 spin_lock(&dir->d_lock); /* probably unneeded */
436 list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
437 if (child->d_inode) /* probably unneeded */
438 child->d_inode->i_private = NULL;
439 }
440 spin_unlock(&dir->d_lock);
437 441
438 return 0; 442 debugfs_remove_recursive(dir);
443 }
444
445 list_del(&file->list);
446 remove_subsystem(file->system);
447 kmem_cache_free(file_cachep, file);
439} 448}
440 449
441/* 450/*
@@ -679,15 +688,25 @@ static ssize_t
679event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 688event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
680 loff_t *ppos) 689 loff_t *ppos)
681{ 690{
682 struct ftrace_event_file *file = filp->private_data; 691 struct ftrace_event_file *file;
692 unsigned long flags;
683 char buf[4] = "0"; 693 char buf[4] = "0";
684 694
685 if (file->flags & FTRACE_EVENT_FL_ENABLED && 695 mutex_lock(&event_mutex);
686 !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 696 file = event_file_data(filp);
697 if (likely(file))
698 flags = file->flags;
699 mutex_unlock(&event_mutex);
700
701 if (!file)
702 return -ENODEV;
703
704 if (flags & FTRACE_EVENT_FL_ENABLED &&
705 !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
687 strcpy(buf, "1"); 706 strcpy(buf, "1");
688 707
689 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED || 708 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
690 file->flags & FTRACE_EVENT_FL_SOFT_MODE) 709 flags & FTRACE_EVENT_FL_SOFT_MODE)
691 strcat(buf, "*"); 710 strcat(buf, "*");
692 711
693 strcat(buf, "\n"); 712 strcat(buf, "\n");
@@ -699,13 +718,10 @@ static ssize_t
699event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 718event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
700 loff_t *ppos) 719 loff_t *ppos)
701{ 720{
702 struct ftrace_event_file *file = filp->private_data; 721 struct ftrace_event_file *file;
703 unsigned long val; 722 unsigned long val;
704 int ret; 723 int ret;
705 724
706 if (!file)
707 return -EINVAL;
708
709 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 725 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
710 if (ret) 726 if (ret)
711 return ret; 727 return ret;
@@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
717 switch (val) { 733 switch (val) {
718 case 0: 734 case 0:
719 case 1: 735 case 1:
736 ret = -ENODEV;
720 mutex_lock(&event_mutex); 737 mutex_lock(&event_mutex);
721 ret = ftrace_event_enable_disable(file, val); 738 file = event_file_data(filp);
739 if (likely(file))
740 ret = ftrace_event_enable_disable(file, val);
722 mutex_unlock(&event_mutex); 741 mutex_unlock(&event_mutex);
723 break; 742 break;
724 743
@@ -825,65 +844,39 @@ enum {
825 844
826static void *f_next(struct seq_file *m, void *v, loff_t *pos) 845static void *f_next(struct seq_file *m, void *v, loff_t *pos)
827{ 846{
828 struct ftrace_event_call *call = m->private; 847 struct ftrace_event_call *call = event_file_data(m->private);
829 struct ftrace_event_field *field;
830 struct list_head *common_head = &ftrace_common_fields; 848 struct list_head *common_head = &ftrace_common_fields;
831 struct list_head *head = trace_get_fields(call); 849 struct list_head *head = trace_get_fields(call);
850 struct list_head *node = v;
832 851
833 (*pos)++; 852 (*pos)++;
834 853
835 switch ((unsigned long)v) { 854 switch ((unsigned long)v) {
836 case FORMAT_HEADER: 855 case FORMAT_HEADER:
837 if (unlikely(list_empty(common_head))) 856 node = common_head;
838 return NULL; 857 break;
839
840 field = list_entry(common_head->prev,
841 struct ftrace_event_field, link);
842 return field;
843 858
844 case FORMAT_FIELD_SEPERATOR: 859 case FORMAT_FIELD_SEPERATOR:
845 if (unlikely(list_empty(head))) 860 node = head;
846 return NULL; 861 break;
847
848 field = list_entry(head->prev, struct ftrace_event_field, link);
849 return field;
850 862
851 case FORMAT_PRINTFMT: 863 case FORMAT_PRINTFMT:
852 /* all done */ 864 /* all done */
853 return NULL; 865 return NULL;
854 } 866 }
855 867
856 field = v; 868 node = node->prev;
857 if (field->link.prev == common_head) 869 if (node == common_head)
858 return (void *)FORMAT_FIELD_SEPERATOR; 870 return (void *)FORMAT_FIELD_SEPERATOR;
859 else if (field->link.prev == head) 871 else if (node == head)
860 return (void *)FORMAT_PRINTFMT; 872 return (void *)FORMAT_PRINTFMT;
861 873 else
862 field = list_entry(field->link.prev, struct ftrace_event_field, link); 874 return node;
863
864 return field;
865}
866
867static void *f_start(struct seq_file *m, loff_t *pos)
868{
869 loff_t l = 0;
870 void *p;
871
872 /* Start by showing the header */
873 if (!*pos)
874 return (void *)FORMAT_HEADER;
875
876 p = (void *)FORMAT_HEADER;
877 do {
878 p = f_next(m, p, &l);
879 } while (p && l < *pos);
880
881 return p;
882} 875}
883 876
884static int f_show(struct seq_file *m, void *v) 877static int f_show(struct seq_file *m, void *v)
885{ 878{
886 struct ftrace_event_call *call = m->private; 879 struct ftrace_event_call *call = event_file_data(m->private);
887 struct ftrace_event_field *field; 880 struct ftrace_event_field *field;
888 const char *array_descriptor; 881 const char *array_descriptor;
889 882
@@ -904,8 +897,7 @@ static int f_show(struct seq_file *m, void *v)
904 return 0; 897 return 0;
905 } 898 }
906 899
907 field = v; 900 field = list_entry(v, struct ftrace_event_field, link);
908
909 /* 901 /*
910 * Smartly shows the array type(except dynamic array). 902 * Smartly shows the array type(except dynamic array).
911 * Normal: 903 * Normal:
@@ -932,8 +924,25 @@ static int f_show(struct seq_file *m, void *v)
932 return 0; 924 return 0;
933} 925}
934 926
927static void *f_start(struct seq_file *m, loff_t *pos)
928{
929 void *p = (void *)FORMAT_HEADER;
930 loff_t l = 0;
931
932 /* ->stop() is called even if ->start() fails */
933 mutex_lock(&event_mutex);
934 if (!event_file_data(m->private))
935 return ERR_PTR(-ENODEV);
936
937 while (l < *pos && p)
938 p = f_next(m, p, &l);
939
940 return p;
941}
942
935static void f_stop(struct seq_file *m, void *p) 943static void f_stop(struct seq_file *m, void *p)
936{ 944{
945 mutex_unlock(&event_mutex);
937} 946}
938 947
939static const struct seq_operations trace_format_seq_ops = { 948static const struct seq_operations trace_format_seq_ops = {
@@ -945,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = {
945 954
946static int trace_format_open(struct inode *inode, struct file *file) 955static int trace_format_open(struct inode *inode, struct file *file)
947{ 956{
948 struct ftrace_event_call *call = inode->i_private;
949 struct seq_file *m; 957 struct seq_file *m;
950 int ret; 958 int ret;
951 959
@@ -954,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
954 return ret; 962 return ret;
955 963
956 m = file->private_data; 964 m = file->private_data;
957 m->private = call; 965 m->private = file;
958 966
959 return 0; 967 return 0;
960} 968}
@@ -962,45 +970,47 @@ static int trace_format_open(struct inode *inode, struct file *file)
962static ssize_t 970static ssize_t
963event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 971event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
964{ 972{
965 struct ftrace_event_call *call = filp->private_data; 973 int id = (long)event_file_data(filp);
966 struct trace_seq *s; 974 char buf[32];
967 int r; 975 int len;
968 976
969 if (*ppos) 977 if (*ppos)
970 return 0; 978 return 0;
971 979
972 s = kmalloc(sizeof(*s), GFP_KERNEL); 980 if (unlikely(!id))
973 if (!s) 981 return -ENODEV;
974 return -ENOMEM;
975 982
976 trace_seq_init(s); 983 len = sprintf(buf, "%d\n", id);
977 trace_seq_printf(s, "%d\n", call->event.type);
978 984
979 r = simple_read_from_buffer(ubuf, cnt, ppos, 985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
980 s->buffer, s->len);
981 kfree(s);
982 return r;
983} 986}
984 987
985static ssize_t 988static ssize_t
986event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 989event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
987 loff_t *ppos) 990 loff_t *ppos)
988{ 991{
989 struct ftrace_event_call *call = filp->private_data; 992 struct ftrace_event_call *call;
990 struct trace_seq *s; 993 struct trace_seq *s;
991 int r; 994 int r = -ENODEV;
992 995
993 if (*ppos) 996 if (*ppos)
994 return 0; 997 return 0;
995 998
996 s = kmalloc(sizeof(*s), GFP_KERNEL); 999 s = kmalloc(sizeof(*s), GFP_KERNEL);
1000
997 if (!s) 1001 if (!s)
998 return -ENOMEM; 1002 return -ENOMEM;
999 1003
1000 trace_seq_init(s); 1004 trace_seq_init(s);
1001 1005
1002 print_event_filter(call, s); 1006 mutex_lock(&event_mutex);
1003 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1007 call = event_file_data(filp);
1008 if (call)
1009 print_event_filter(call, s);
1010 mutex_unlock(&event_mutex);
1011
1012 if (call)
1013 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1004 1014
1005 kfree(s); 1015 kfree(s);
1006 1016
@@ -1011,9 +1021,9 @@ static ssize_t
1011event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1021event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1012 loff_t *ppos) 1022 loff_t *ppos)
1013{ 1023{
1014 struct ftrace_event_call *call = filp->private_data; 1024 struct ftrace_event_call *call;
1015 char *buf; 1025 char *buf;
1016 int err; 1026 int err = -ENODEV;
1017 1027
1018 if (cnt >= PAGE_SIZE) 1028 if (cnt >= PAGE_SIZE)
1019 return -EINVAL; 1029 return -EINVAL;
@@ -1028,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1028 } 1038 }
1029 buf[cnt] = '\0'; 1039 buf[cnt] = '\0';
1030 1040
1031 err = apply_event_filter(call, buf); 1041 mutex_lock(&event_mutex);
1042 call = event_file_data(filp);
1043 if (call)
1044 err = apply_event_filter(call, buf);
1045 mutex_unlock(&event_mutex);
1046
1032 free_page((unsigned long) buf); 1047 free_page((unsigned long) buf);
1033 if (err < 0) 1048 if (err < 0)
1034 return err; 1049 return err;
@@ -1218,6 +1233,7 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1218 1233
1219static int ftrace_event_avail_open(struct inode *inode, struct file *file); 1234static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1220static int ftrace_event_set_open(struct inode *inode, struct file *file); 1235static int ftrace_event_set_open(struct inode *inode, struct file *file);
1236static int ftrace_event_release(struct inode *inode, struct file *file);
1221 1237
1222static const struct seq_operations show_event_seq_ops = { 1238static const struct seq_operations show_event_seq_ops = {
1223 .start = t_start, 1239 .start = t_start,
@@ -1245,14 +1261,13 @@ static const struct file_operations ftrace_set_event_fops = {
1245 .read = seq_read, 1261 .read = seq_read,
1246 .write = ftrace_event_write, 1262 .write = ftrace_event_write,
1247 .llseek = seq_lseek, 1263 .llseek = seq_lseek,
1248 .release = seq_release, 1264 .release = ftrace_event_release,
1249}; 1265};
1250 1266
1251static const struct file_operations ftrace_enable_fops = { 1267static const struct file_operations ftrace_enable_fops = {
1252 .open = tracing_open_generic_file, 1268 .open = tracing_open_generic,
1253 .read = event_enable_read, 1269 .read = event_enable_read,
1254 .write = event_enable_write, 1270 .write = event_enable_write,
1255 .release = tracing_release_generic_file,
1256 .llseek = default_llseek, 1271 .llseek = default_llseek,
1257}; 1272};
1258 1273
@@ -1264,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = {
1264}; 1279};
1265 1280
1266static const struct file_operations ftrace_event_id_fops = { 1281static const struct file_operations ftrace_event_id_fops = {
1267 .open = tracing_open_generic,
1268 .read = event_id_read, 1282 .read = event_id_read,
1269 .llseek = default_llseek, 1283 .llseek = default_llseek,
1270}; 1284};
@@ -1323,6 +1337,15 @@ ftrace_event_open(struct inode *inode, struct file *file,
1323 return ret; 1337 return ret;
1324} 1338}
1325 1339
1340static int ftrace_event_release(struct inode *inode, struct file *file)
1341{
1342 struct trace_array *tr = inode->i_private;
1343
1344 trace_array_put(tr);
1345
1346 return seq_release(inode, file);
1347}
1348
1326static int 1349static int
1327ftrace_event_avail_open(struct inode *inode, struct file *file) 1350ftrace_event_avail_open(struct inode *inode, struct file *file)
1328{ 1351{
@@ -1336,12 +1359,19 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
1336{ 1359{
1337 const struct seq_operations *seq_ops = &show_set_event_seq_ops; 1360 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1338 struct trace_array *tr = inode->i_private; 1361 struct trace_array *tr = inode->i_private;
1362 int ret;
1363
1364 if (trace_array_get(tr) < 0)
1365 return -ENODEV;
1339 1366
1340 if ((file->f_mode & FMODE_WRITE) && 1367 if ((file->f_mode & FMODE_WRITE) &&
1341 (file->f_flags & O_TRUNC)) 1368 (file->f_flags & O_TRUNC))
1342 ftrace_clear_events(tr); 1369 ftrace_clear_events(tr);
1343 1370
1344 return ftrace_event_open(inode, file, seq_ops); 1371 ret = ftrace_event_open(inode, file, seq_ops);
1372 if (ret < 0)
1373 trace_array_put(tr);
1374 return ret;
1345} 1375}
1346 1376
1347static struct event_subsystem * 1377static struct event_subsystem *
@@ -1496,8 +1526,8 @@ event_create_dir(struct dentry *parent,
1496 1526
1497#ifdef CONFIG_PERF_EVENTS 1527#ifdef CONFIG_PERF_EVENTS
1498 if (call->event.type && call->class->reg) 1528 if (call->event.type && call->class->reg)
1499 trace_create_file("id", 0444, file->dir, call, 1529 trace_create_file("id", 0444, file->dir,
1500 id); 1530 (void *)(long)call->event.type, id);
1501#endif 1531#endif
1502 1532
1503 /* 1533 /*
@@ -1522,33 +1552,16 @@ event_create_dir(struct dentry *parent,
1522 return 0; 1552 return 0;
1523} 1553}
1524 1554
1525static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1526{
1527 if (!dir)
1528 return;
1529
1530 if (!--dir->nr_events) {
1531 debugfs_remove_recursive(dir->entry);
1532 list_del(&dir->list);
1533 __put_system_dir(dir);
1534 }
1535}
1536
1537static void remove_event_from_tracers(struct ftrace_event_call *call) 1555static void remove_event_from_tracers(struct ftrace_event_call *call)
1538{ 1556{
1539 struct ftrace_event_file *file; 1557 struct ftrace_event_file *file;
1540 struct trace_array *tr; 1558 struct trace_array *tr;
1541 1559
1542 do_for_each_event_file_safe(tr, file) { 1560 do_for_each_event_file_safe(tr, file) {
1543
1544 if (file->event_call != call) 1561 if (file->event_call != call)
1545 continue; 1562 continue;
1546 1563
1547 list_del(&file->list); 1564 remove_event_file_dir(file);
1548 debugfs_remove_recursive(file->dir);
1549 remove_subsystem(file->system);
1550 kmem_cache_free(file_cachep, file);
1551
1552 /* 1565 /*
1553 * The do_for_each_event_file_safe() is 1566 * The do_for_each_event_file_safe() is
1554 * a double loop. After finding the call for this 1567 * a double loop. After finding the call for this
@@ -1700,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
1700 destroy_preds(call); 1713 destroy_preds(call);
1701} 1714}
1702 1715
1716static int probe_remove_event_call(struct ftrace_event_call *call)
1717{
1718 struct trace_array *tr;
1719 struct ftrace_event_file *file;
1720
1721#ifdef CONFIG_PERF_EVENTS
1722 if (call->perf_refcount)
1723 return -EBUSY;
1724#endif
1725 do_for_each_event_file(tr, file) {
1726 if (file->event_call != call)
1727 continue;
1728 /*
1729 * We can't rely on ftrace_event_enable_disable(enable => 0)
1730 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1731 * TRACE_REG_UNREGISTER.
1732 */
1733 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1734 return -EBUSY;
1735 /*
1736 * The do_for_each_event_file_safe() is
1737 * a double loop. After finding the call for this
1738 * trace_array, we use break to jump to the next
1739 * trace_array.
1740 */
1741 break;
1742 } while_for_each_event_file();
1743
1744 __trace_remove_event_call(call);
1745
1746 return 0;
1747}
1748
1703/* Remove an event_call */ 1749/* Remove an event_call */
1704void trace_remove_event_call(struct ftrace_event_call *call) 1750int trace_remove_event_call(struct ftrace_event_call *call)
1705{ 1751{
1752 int ret;
1753
1706 mutex_lock(&trace_types_lock); 1754 mutex_lock(&trace_types_lock);
1707 mutex_lock(&event_mutex); 1755 mutex_lock(&event_mutex);
1708 down_write(&trace_event_sem); 1756 down_write(&trace_event_sem);
1709 __trace_remove_event_call(call); 1757 ret = probe_remove_event_call(call);
1710 up_write(&trace_event_sem); 1758 up_write(&trace_event_sem);
1711 mutex_unlock(&event_mutex); 1759 mutex_unlock(&event_mutex);
1712 mutex_unlock(&trace_types_lock); 1760 mutex_unlock(&trace_types_lock);
1761
1762 return ret;
1713} 1763}
1714 1764
1715#define for_each_event(event, start, end) \ 1765#define for_each_event(event, start, end) \
@@ -2278,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
2278{ 2328{
2279 struct ftrace_event_file *file, *next; 2329 struct ftrace_event_file *file, *next;
2280 2330
2281 list_for_each_entry_safe(file, next, &tr->events, list) { 2331 list_for_each_entry_safe(file, next, &tr->events, list)
2282 list_del(&file->list); 2332 remove_event_file_dir(file);
2283 debugfs_remove_recursive(file->dir);
2284 remove_subsystem(file->system);
2285 kmem_cache_free(file_cachep, file);
2286 }
2287} 2333}
2288 2334
2289static void 2335static void
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0d883dc057d6..97daa8cf958d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps,
637 free_page((unsigned long) buf); 637 free_page((unsigned long) buf);
638} 638}
639 639
640/* caller must hold event_mutex */
640void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) 641void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
641{ 642{
642 struct event_filter *filter; 643 struct event_filter *filter = call->filter;
643 644
644 mutex_lock(&event_mutex);
645 filter = call->filter;
646 if (filter && filter->filter_string) 645 if (filter && filter->filter_string)
647 trace_seq_printf(s, "%s\n", filter->filter_string); 646 trace_seq_printf(s, "%s\n", filter->filter_string);
648 else 647 else
649 trace_seq_printf(s, "none\n"); 648 trace_seq_puts(s, "none\n");
650 mutex_unlock(&event_mutex);
651} 649}
652 650
653void print_subsystem_event_filter(struct event_subsystem *system, 651void print_subsystem_event_filter(struct event_subsystem *system,
@@ -660,7 +658,7 @@ void print_subsystem_event_filter(struct event_subsystem *system,
660 if (filter && filter->filter_string) 658 if (filter && filter->filter_string)
661 trace_seq_printf(s, "%s\n", filter->filter_string); 659 trace_seq_printf(s, "%s\n", filter->filter_string);
662 else 660 else
663 trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); 661 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
664 mutex_unlock(&event_mutex); 662 mutex_unlock(&event_mutex);
665} 663}
666 664
@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system,
1841 return err; 1839 return err;
1842} 1840}
1843 1841
1842/* caller must hold event_mutex */
1844int apply_event_filter(struct ftrace_event_call *call, char *filter_string) 1843int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1845{ 1844{
1846 struct event_filter *filter; 1845 struct event_filter *filter;
1847 int err = 0; 1846 int err;
1848
1849 mutex_lock(&event_mutex);
1850 1847
1851 if (!strcmp(strstrip(filter_string), "0")) { 1848 if (!strcmp(strstrip(filter_string), "0")) {
1852 filter_disable(call); 1849 filter_disable(call);
1853 filter = call->filter; 1850 filter = call->filter;
1854 if (!filter) 1851 if (!filter)
1855 goto out_unlock; 1852 return 0;
1856 RCU_INIT_POINTER(call->filter, NULL); 1853 RCU_INIT_POINTER(call->filter, NULL);
1857 /* Make sure the filter is not being used */ 1854 /* Make sure the filter is not being used */
1858 synchronize_sched(); 1855 synchronize_sched();
1859 __free_filter(filter); 1856 __free_filter(filter);
1860 goto out_unlock; 1857 return 0;
1861 } 1858 }
1862 1859
1863 err = create_filter(call, filter_string, true, &filter); 1860 err = create_filter(call, filter_string, true, &filter);
@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1884 __free_filter(tmp); 1881 __free_filter(tmp);
1885 } 1882 }
1886 } 1883 }
1887out_unlock:
1888 mutex_unlock(&event_mutex);
1889 1884
1890 return err; 1885 return err;
1891} 1886}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index b863f93b30f3..38fe1483c508 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -199,7 +199,7 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
199 return 0; 199 return 0;
200} 200}
201 201
202static struct tracer function_trace __read_mostly = 202static struct tracer function_trace __tracer_data =
203{ 203{
204 .name = "function", 204 .name = "function",
205 .init = function_trace_init, 205 .init = function_trace_init,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 8388bc99f2ee..b5c09242683d 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -446,7 +446,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
446 446
447 /* First spaces to align center */ 447 /* First spaces to align center */
448 for (i = 0; i < spaces / 2; i++) { 448 for (i = 0; i < spaces / 2; i++) {
449 ret = trace_seq_printf(s, " "); 449 ret = trace_seq_putc(s, ' ');
450 if (!ret) 450 if (!ret)
451 return TRACE_TYPE_PARTIAL_LINE; 451 return TRACE_TYPE_PARTIAL_LINE;
452 } 452 }
@@ -457,7 +457,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
457 457
458 /* Last spaces to align center */ 458 /* Last spaces to align center */
459 for (i = 0; i < spaces - (spaces / 2); i++) { 459 for (i = 0; i < spaces - (spaces / 2); i++) {
460 ret = trace_seq_printf(s, " "); 460 ret = trace_seq_putc(s, ' ');
461 if (!ret) 461 if (!ret)
462 return TRACE_TYPE_PARTIAL_LINE; 462 return TRACE_TYPE_PARTIAL_LINE;
463 } 463 }
@@ -503,7 +503,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
503 ------------------------------------------ 503 ------------------------------------------
504 504
505 */ 505 */
506 ret = trace_seq_printf(s, 506 ret = trace_seq_puts(s,
507 " ------------------------------------------\n"); 507 " ------------------------------------------\n");
508 if (!ret) 508 if (!ret)
509 return TRACE_TYPE_PARTIAL_LINE; 509 return TRACE_TYPE_PARTIAL_LINE;
@@ -516,7 +516,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
516 if (ret == TRACE_TYPE_PARTIAL_LINE) 516 if (ret == TRACE_TYPE_PARTIAL_LINE)
517 return TRACE_TYPE_PARTIAL_LINE; 517 return TRACE_TYPE_PARTIAL_LINE;
518 518
519 ret = trace_seq_printf(s, " => "); 519 ret = trace_seq_puts(s, " => ");
520 if (!ret) 520 if (!ret)
521 return TRACE_TYPE_PARTIAL_LINE; 521 return TRACE_TYPE_PARTIAL_LINE;
522 522
@@ -524,7 +524,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
524 if (ret == TRACE_TYPE_PARTIAL_LINE) 524 if (ret == TRACE_TYPE_PARTIAL_LINE)
525 return TRACE_TYPE_PARTIAL_LINE; 525 return TRACE_TYPE_PARTIAL_LINE;
526 526
527 ret = trace_seq_printf(s, 527 ret = trace_seq_puts(s,
528 "\n ------------------------------------------\n\n"); 528 "\n ------------------------------------------\n\n");
529 if (!ret) 529 if (!ret)
530 return TRACE_TYPE_PARTIAL_LINE; 530 return TRACE_TYPE_PARTIAL_LINE;
@@ -645,7 +645,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
645 ret = print_graph_proc(s, pid); 645 ret = print_graph_proc(s, pid);
646 if (ret == TRACE_TYPE_PARTIAL_LINE) 646 if (ret == TRACE_TYPE_PARTIAL_LINE)
647 return TRACE_TYPE_PARTIAL_LINE; 647 return TRACE_TYPE_PARTIAL_LINE;
648 ret = trace_seq_printf(s, " | "); 648 ret = trace_seq_puts(s, " | ");
649 if (!ret) 649 if (!ret)
650 return TRACE_TYPE_PARTIAL_LINE; 650 return TRACE_TYPE_PARTIAL_LINE;
651 } 651 }
@@ -657,9 +657,9 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
657 return ret; 657 return ret;
658 658
659 if (type == TRACE_GRAPH_ENT) 659 if (type == TRACE_GRAPH_ENT)
660 ret = trace_seq_printf(s, "==========>"); 660 ret = trace_seq_puts(s, "==========>");
661 else 661 else
662 ret = trace_seq_printf(s, "<=========="); 662 ret = trace_seq_puts(s, "<==========");
663 663
664 if (!ret) 664 if (!ret)
665 return TRACE_TYPE_PARTIAL_LINE; 665 return TRACE_TYPE_PARTIAL_LINE;
@@ -668,7 +668,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
668 if (ret != TRACE_TYPE_HANDLED) 668 if (ret != TRACE_TYPE_HANDLED)
669 return ret; 669 return ret;
670 670
671 ret = trace_seq_printf(s, "\n"); 671 ret = trace_seq_putc(s, '\n');
672 672
673 if (!ret) 673 if (!ret)
674 return TRACE_TYPE_PARTIAL_LINE; 674 return TRACE_TYPE_PARTIAL_LINE;
@@ -705,13 +705,13 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
705 len += strlen(nsecs_str); 705 len += strlen(nsecs_str);
706 } 706 }
707 707
708 ret = trace_seq_printf(s, " us "); 708 ret = trace_seq_puts(s, " us ");
709 if (!ret) 709 if (!ret)
710 return TRACE_TYPE_PARTIAL_LINE; 710 return TRACE_TYPE_PARTIAL_LINE;
711 711
712 /* Print remaining spaces to fit the row's width */ 712 /* Print remaining spaces to fit the row's width */
713 for (i = len; i < 7; i++) { 713 for (i = len; i < 7; i++) {
714 ret = trace_seq_printf(s, " "); 714 ret = trace_seq_putc(s, ' ');
715 if (!ret) 715 if (!ret)
716 return TRACE_TYPE_PARTIAL_LINE; 716 return TRACE_TYPE_PARTIAL_LINE;
717 } 717 }
@@ -731,13 +731,13 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
731 /* No real adata, just filling the column with spaces */ 731 /* No real adata, just filling the column with spaces */
732 switch (duration) { 732 switch (duration) {
733 case DURATION_FILL_FULL: 733 case DURATION_FILL_FULL:
734 ret = trace_seq_printf(s, " | "); 734 ret = trace_seq_puts(s, " | ");
735 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 735 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
736 case DURATION_FILL_START: 736 case DURATION_FILL_START:
737 ret = trace_seq_printf(s, " "); 737 ret = trace_seq_puts(s, " ");
738 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 738 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
739 case DURATION_FILL_END: 739 case DURATION_FILL_END:
740 ret = trace_seq_printf(s, " |"); 740 ret = trace_seq_puts(s, " |");
741 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 741 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
742 } 742 }
743 743
@@ -745,10 +745,10 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
745 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { 745 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
746 /* Duration exceeded 100 msecs */ 746 /* Duration exceeded 100 msecs */
747 if (duration > 100000ULL) 747 if (duration > 100000ULL)
748 ret = trace_seq_printf(s, "! "); 748 ret = trace_seq_puts(s, "! ");
749 /* Duration exceeded 10 msecs */ 749 /* Duration exceeded 10 msecs */
750 else if (duration > 10000ULL) 750 else if (duration > 10000ULL)
751 ret = trace_seq_printf(s, "+ "); 751 ret = trace_seq_puts(s, "+ ");
752 } 752 }
753 753
754 /* 754 /*
@@ -757,7 +757,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
757 * to fill out the space. 757 * to fill out the space.
758 */ 758 */
759 if (ret == -1) 759 if (ret == -1)
760 ret = trace_seq_printf(s, " "); 760 ret = trace_seq_puts(s, " ");
761 761
762 /* Catching here any failure happenned above */ 762 /* Catching here any failure happenned above */
763 if (!ret) 763 if (!ret)
@@ -767,7 +767,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,
767 if (ret != TRACE_TYPE_HANDLED) 767 if (ret != TRACE_TYPE_HANDLED)
768 return ret; 768 return ret;
769 769
770 ret = trace_seq_printf(s, "| "); 770 ret = trace_seq_puts(s, "| ");
771 if (!ret) 771 if (!ret)
772 return TRACE_TYPE_PARTIAL_LINE; 772 return TRACE_TYPE_PARTIAL_LINE;
773 773
@@ -817,7 +817,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
817 817
818 /* Function */ 818 /* Function */
819 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 819 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
820 ret = trace_seq_printf(s, " "); 820 ret = trace_seq_putc(s, ' ');
821 if (!ret) 821 if (!ret)
822 return TRACE_TYPE_PARTIAL_LINE; 822 return TRACE_TYPE_PARTIAL_LINE;
823 } 823 }
@@ -858,7 +858,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
858 858
859 /* Function */ 859 /* Function */
860 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 860 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
861 ret = trace_seq_printf(s, " "); 861 ret = trace_seq_putc(s, ' ');
862 if (!ret) 862 if (!ret)
863 return TRACE_TYPE_PARTIAL_LINE; 863 return TRACE_TYPE_PARTIAL_LINE;
864 } 864 }
@@ -917,7 +917,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
917 if (ret == TRACE_TYPE_PARTIAL_LINE) 917 if (ret == TRACE_TYPE_PARTIAL_LINE)
918 return TRACE_TYPE_PARTIAL_LINE; 918 return TRACE_TYPE_PARTIAL_LINE;
919 919
920 ret = trace_seq_printf(s, " | "); 920 ret = trace_seq_puts(s, " | ");
921 if (!ret) 921 if (!ret)
922 return TRACE_TYPE_PARTIAL_LINE; 922 return TRACE_TYPE_PARTIAL_LINE;
923 } 923 }
@@ -1117,7 +1117,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1117 1117
1118 /* Closing brace */ 1118 /* Closing brace */
1119 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 1119 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1120 ret = trace_seq_printf(s, " "); 1120 ret = trace_seq_putc(s, ' ');
1121 if (!ret) 1121 if (!ret)
1122 return TRACE_TYPE_PARTIAL_LINE; 1122 return TRACE_TYPE_PARTIAL_LINE;
1123 } 1123 }
@@ -1129,7 +1129,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1129 * belongs to, write out the function name. 1129 * belongs to, write out the function name.
1130 */ 1130 */
1131 if (func_match) { 1131 if (func_match) {
1132 ret = trace_seq_printf(s, "}\n"); 1132 ret = trace_seq_puts(s, "}\n");
1133 if (!ret) 1133 if (!ret)
1134 return TRACE_TYPE_PARTIAL_LINE; 1134 return TRACE_TYPE_PARTIAL_LINE;
1135 } else { 1135 } else {
@@ -1179,13 +1179,13 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1179 /* Indentation */ 1179 /* Indentation */
1180 if (depth > 0) 1180 if (depth > 0)
1181 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { 1181 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1182 ret = trace_seq_printf(s, " "); 1182 ret = trace_seq_putc(s, ' ');
1183 if (!ret) 1183 if (!ret)
1184 return TRACE_TYPE_PARTIAL_LINE; 1184 return TRACE_TYPE_PARTIAL_LINE;
1185 } 1185 }
1186 1186
1187 /* The comment */ 1187 /* The comment */
1188 ret = trace_seq_printf(s, "/* "); 1188 ret = trace_seq_puts(s, "/* ");
1189 if (!ret) 1189 if (!ret)
1190 return TRACE_TYPE_PARTIAL_LINE; 1190 return TRACE_TYPE_PARTIAL_LINE;
1191 1191
@@ -1216,7 +1216,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1216 s->len--; 1216 s->len--;
1217 } 1217 }
1218 1218
1219 ret = trace_seq_printf(s, " */\n"); 1219 ret = trace_seq_puts(s, " */\n");
1220 if (!ret) 1220 if (!ret)
1221 return TRACE_TYPE_PARTIAL_LINE; 1221 return TRACE_TYPE_PARTIAL_LINE;
1222 1222
@@ -1448,7 +1448,7 @@ static struct trace_event graph_trace_ret_event = {
1448 .funcs = &graph_functions 1448 .funcs = &graph_functions
1449}; 1449};
1450 1450
1451static struct tracer graph_trace __read_mostly = { 1451static struct tracer graph_trace __tracer_data = {
1452 .name = "function_graph", 1452 .name = "function_graph",
1453 .open = graph_trace_open, 1453 .open = graph_trace_open,
1454 .pipe_open = graph_trace_open, 1454 .pipe_open = graph_trace_open,
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 7ed6976493c8..243f6834d026 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
95} 95}
96 96
97static int register_probe_event(struct trace_probe *tp); 97static int register_probe_event(struct trace_probe *tp);
98static void unregister_probe_event(struct trace_probe *tp); 98static int unregister_probe_event(struct trace_probe *tp);
99 99
100static DEFINE_MUTEX(probe_lock); 100static DEFINE_MUTEX(probe_lock);
101static LIST_HEAD(probe_list); 101static LIST_HEAD(probe_list);
@@ -243,11 +243,11 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
243static int 243static int
244disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) 244disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
245{ 245{
246 struct event_file_link *link = NULL;
247 int wait = 0;
246 int ret = 0; 248 int ret = 0;
247 249
248 if (file) { 250 if (file) {
249 struct event_file_link *link;
250
251 link = find_event_file_link(tp, file); 251 link = find_event_file_link(tp, file);
252 if (!link) { 252 if (!link) {
253 ret = -EINVAL; 253 ret = -EINVAL;
@@ -255,10 +255,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
255 } 255 }
256 256
257 list_del_rcu(&link->list); 257 list_del_rcu(&link->list);
258 /* synchronize with kprobe_trace_func/kretprobe_trace_func */ 258 wait = 1;
259 synchronize_sched();
260 kfree(link);
261
262 if (!list_empty(&tp->files)) 259 if (!list_empty(&tp->files))
263 goto out; 260 goto out;
264 261
@@ -271,8 +268,22 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
271 disable_kretprobe(&tp->rp); 268 disable_kretprobe(&tp->rp);
272 else 269 else
273 disable_kprobe(&tp->rp.kp); 270 disable_kprobe(&tp->rp.kp);
271 wait = 1;
274 } 272 }
275 out: 273 out:
274 if (wait) {
275 /*
276 * Synchronize with kprobe_trace_func/kretprobe_trace_func
277 * to ensure disabled (all running handlers are finished).
278 * This is not only for kfree(), but also the caller,
279 * trace_remove_event_call() supposes it for releasing
280 * event_call related objects, which will be accessed in
281 * the kprobe_trace_func/kretprobe_trace_func.
282 */
283 synchronize_sched();
284 kfree(link); /* Ignored if link == NULL */
285 }
286
276 return ret; 287 return ret;
277} 288}
278 289
@@ -340,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
340 if (trace_probe_is_enabled(tp)) 351 if (trace_probe_is_enabled(tp))
341 return -EBUSY; 352 return -EBUSY;
342 353
354 /* Will fail if probe is being used by ftrace or perf */
355 if (unregister_probe_event(tp))
356 return -EBUSY;
357
343 __unregister_trace_probe(tp); 358 __unregister_trace_probe(tp);
344 list_del(&tp->list); 359 list_del(&tp->list);
345 unregister_probe_event(tp);
346 360
347 return 0; 361 return 0;
348} 362}
@@ -621,7 +635,9 @@ static int release_all_trace_probes(void)
621 /* TODO: Use batch unregistration */ 635 /* TODO: Use batch unregistration */
622 while (!list_empty(&probe_list)) { 636 while (!list_empty(&probe_list)) {
623 tp = list_entry(probe_list.next, struct trace_probe, list); 637 tp = list_entry(probe_list.next, struct trace_probe, list);
624 unregister_trace_probe(tp); 638 ret = unregister_trace_probe(tp);
639 if (ret)
640 goto end;
625 free_trace_probe(tp); 641 free_trace_probe(tp);
626 } 642 }
627 643
@@ -1087,9 +1103,6 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
1087 __size = sizeof(*entry) + tp->size + dsize; 1103 __size = sizeof(*entry) + tp->size + dsize;
1088 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1104 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1089 size -= sizeof(u32); 1105 size -= sizeof(u32);
1090 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1091 "profile buffer not large enough"))
1092 return;
1093 1106
1094 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1107 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1095 if (!entry) 1108 if (!entry)
@@ -1120,9 +1133,6 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
1120 __size = sizeof(*entry) + tp->size + dsize; 1133 __size = sizeof(*entry) + tp->size + dsize;
1121 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1134 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1122 size -= sizeof(u32); 1135 size -= sizeof(u32);
1123 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1124 "profile buffer not large enough"))
1125 return;
1126 1136
1127 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1137 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1128 if (!entry) 1138 if (!entry)
@@ -1242,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp)
1242 return ret; 1252 return ret;
1243} 1253}
1244 1254
1245static void unregister_probe_event(struct trace_probe *tp) 1255static int unregister_probe_event(struct trace_probe *tp)
1246{ 1256{
1257 int ret;
1258
1247 /* tp->event is unregistered in trace_remove_event_call() */ 1259 /* tp->event is unregistered in trace_remove_event_call() */
1248 trace_remove_event_call(&tp->call); 1260 ret = trace_remove_event_call(&tp->call);
1249 kfree(tp->call.print_fmt); 1261 if (!ret)
1262 kfree(tp->call.print_fmt);
1263 return ret;
1250} 1264}
1251 1265
1252/* Make a debugfs interface for controlling probe points */ 1266/* Make a debugfs interface for controlling probe points */
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index a5e8f4878bfa..b3dcfb2f0fef 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -90,7 +90,7 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
90 if (drv) 90 if (drv)
91 ret += trace_seq_printf(s, " %s\n", drv->name); 91 ret += trace_seq_printf(s, " %s\n", drv->name);
92 else 92 else
93 ret += trace_seq_printf(s, " \n"); 93 ret += trace_seq_puts(s, " \n");
94 return ret; 94 return ret;
95} 95}
96 96
@@ -107,7 +107,7 @@ static void mmio_pipe_open(struct trace_iterator *iter)
107 struct header_iter *hiter; 107 struct header_iter *hiter;
108 struct trace_seq *s = &iter->seq; 108 struct trace_seq *s = &iter->seq;
109 109
110 trace_seq_printf(s, "VERSION 20070824\n"); 110 trace_seq_puts(s, "VERSION 20070824\n");
111 111
112 hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); 112 hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
113 if (!hiter) 113 if (!hiter)
@@ -209,7 +209,7 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
209 (rw->value >> 0) & 0xff, rw->pc, 0); 209 (rw->value >> 0) & 0xff, rw->pc, 0);
210 break; 210 break;
211 default: 211 default:
212 ret = trace_seq_printf(s, "rw what?\n"); 212 ret = trace_seq_puts(s, "rw what?\n");
213 break; 213 break;
214 } 214 }
215 if (ret) 215 if (ret)
@@ -245,7 +245,7 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter)
245 secs, usec_rem, m->map_id, 0UL, 0); 245 secs, usec_rem, m->map_id, 0UL, 0);
246 break; 246 break;
247 default: 247 default:
248 ret = trace_seq_printf(s, "map what?\n"); 248 ret = trace_seq_puts(s, "map what?\n");
249 break; 249 break;
250 } 250 }
251 if (ret) 251 if (ret)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index bb922d9ee51b..34e7cbac0c9c 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -78,7 +78,7 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
78 78
79 trace_assign_type(field, entry); 79 trace_assign_type(field, entry);
80 80
81 ret = trace_seq_printf(s, "%s", field->buf); 81 ret = trace_seq_puts(s, field->buf);
82 if (!ret) 82 if (!ret)
83 return TRACE_TYPE_PARTIAL_LINE; 83 return TRACE_TYPE_PARTIAL_LINE;
84 84
@@ -558,14 +558,14 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
558 if (ret) 558 if (ret)
559 ret = trace_seq_puts(s, "??"); 559 ret = trace_seq_puts(s, "??");
560 if (ret) 560 if (ret)
561 ret = trace_seq_puts(s, "\n"); 561 ret = trace_seq_putc(s, '\n');
562 continue; 562 continue;
563 } 563 }
564 if (!ret) 564 if (!ret)
565 break; 565 break;
566 if (ret) 566 if (ret)
567 ret = seq_print_user_ip(s, mm, ip, sym_flags); 567 ret = seq_print_user_ip(s, mm, ip, sym_flags);
568 ret = trace_seq_puts(s, "\n"); 568 ret = trace_seq_putc(s, '\n');
569 } 569 }
570 570
571 if (mm) 571 if (mm)
@@ -579,7 +579,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
579 int ret; 579 int ret;
580 580
581 if (!ip) 581 if (!ip)
582 return trace_seq_printf(s, "0"); 582 return trace_seq_putc(s, '0');
583 583
584 if (sym_flags & TRACE_ITER_SYM_OFFSET) 584 if (sym_flags & TRACE_ITER_SYM_OFFSET)
585 ret = seq_print_sym_offset(s, "%s", ip); 585 ret = seq_print_sym_offset(s, "%s", ip);
@@ -964,14 +964,14 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
964 goto partial; 964 goto partial;
965 965
966 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { 966 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
967 if (!trace_seq_printf(s, " <-")) 967 if (!trace_seq_puts(s, " <-"))
968 goto partial; 968 goto partial;
969 if (!seq_print_ip_sym(s, 969 if (!seq_print_ip_sym(s,
970 field->parent_ip, 970 field->parent_ip,
971 flags)) 971 flags))
972 goto partial; 972 goto partial;
973 } 973 }
974 if (!trace_seq_printf(s, "\n")) 974 if (!trace_seq_putc(s, '\n'))
975 goto partial; 975 goto partial;
976 976
977 return TRACE_TYPE_HANDLED; 977 return TRACE_TYPE_HANDLED;
@@ -1210,7 +1210,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1210 1210
1211 if (!seq_print_ip_sym(s, *p, flags)) 1211 if (!seq_print_ip_sym(s, *p, flags))
1212 goto partial; 1212 goto partial;
1213 if (!trace_seq_puts(s, "\n")) 1213 if (!trace_seq_putc(s, '\n'))
1214 goto partial; 1214 goto partial;
1215 } 1215 }
1216 1216
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index a9077c1b4ad3..2900817ba65c 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -244,12 +244,31 @@ static const char **find_next(void *v, loff_t *pos)
244{ 244{
245 const char **fmt = v; 245 const char **fmt = v;
246 int start_index; 246 int start_index;
247 int last_index;
247 248
248 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; 249 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
249 250
250 if (*pos < start_index) 251 if (*pos < start_index)
251 return __start___trace_bprintk_fmt + *pos; 252 return __start___trace_bprintk_fmt + *pos;
252 253
254 /*
255 * The __tracepoint_str section is treated the same as the
256 * __trace_printk_fmt section. The difference is that the
257 * __trace_printk_fmt section should only be used by trace_printk()
258 * in a debugging environment, as if anything exists in that section
259 * the trace_prink() helper buffers are allocated, which would just
260 * waste space in a production environment.
261 *
262 * The __tracepoint_str sections on the other hand are used by
263 * tracepoints which need to map pointers to their strings to
264 * the ASCII text for userspace.
265 */
266 last_index = start_index;
267 start_index = __stop___tracepoint_str - __start___tracepoint_str;
268
269 if (*pos < last_index + start_index)
270 return __start___tracepoint_str + (*pos - last_index);
271
253 return find_next_mod_format(start_index, v, fmt, pos); 272 return find_next_mod_format(start_index, v, fmt, pos);
254} 273}
255 274
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 322e16461072..8fd03657bc7d 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -175,7 +175,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
175 entry = syscall_nr_to_meta(syscall); 175 entry = syscall_nr_to_meta(syscall);
176 176
177 if (!entry) { 177 if (!entry) {
178 trace_seq_printf(s, "\n"); 178 trace_seq_putc(s, '\n');
179 return TRACE_TYPE_HANDLED; 179 return TRACE_TYPE_HANDLED;
180 } 180 }
181 181
@@ -566,15 +566,15 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
566 if (!sys_data) 566 if (!sys_data)
567 return; 567 return;
568 568
569 head = this_cpu_ptr(sys_data->enter_event->perf_events);
570 if (hlist_empty(head))
571 return;
572
569 /* get the size after alignment with the u32 buffer size field */ 573 /* get the size after alignment with the u32 buffer size field */
570 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); 574 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
571 size = ALIGN(size + sizeof(u32), sizeof(u64)); 575 size = ALIGN(size + sizeof(u32), sizeof(u64));
572 size -= sizeof(u32); 576 size -= sizeof(u32);
573 577
574 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
575 "perf buffer not large enough"))
576 return;
577
578 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, 578 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
579 sys_data->enter_event->event.type, regs, &rctx); 579 sys_data->enter_event->event.type, regs, &rctx);
580 if (!rec) 580 if (!rec)
@@ -583,8 +583,6 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
583 rec->nr = syscall_nr; 583 rec->nr = syscall_nr;
584 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 584 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
585 (unsigned long *)&rec->args); 585 (unsigned long *)&rec->args);
586
587 head = this_cpu_ptr(sys_data->enter_event->perf_events);
588 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 586 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
589} 587}
590 588
@@ -642,18 +640,14 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
642 if (!sys_data) 640 if (!sys_data)
643 return; 641 return;
644 642
643 head = this_cpu_ptr(sys_data->exit_event->perf_events);
644 if (hlist_empty(head))
645 return;
646
645 /* We can probably do that at build time */ 647 /* We can probably do that at build time */
646 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); 648 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
647 size -= sizeof(u32); 649 size -= sizeof(u32);
648 650
649 /*
650 * Impossible, but be paranoid with the future
651 * How to put this check outside runtime?
652 */
653 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
654 "exit event has grown above perf buffer size"))
655 return;
656
657 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, 651 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
658 sys_data->exit_event->event.type, regs, &rctx); 652 sys_data->exit_event->event.type, regs, &rctx);
659 if (!rec) 653 if (!rec)
@@ -661,8 +655,6 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
661 655
662 rec->nr = syscall_nr; 656 rec->nr = syscall_nr;
663 rec->ret = syscall_get_return_value(current, regs); 657 rec->ret = syscall_get_return_value(current, regs);
664
665 head = this_cpu_ptr(sys_data->exit_event->perf_events);
666 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 658 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
667} 659}
668 660
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index d5d0cd368a56..272261b5f94f 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -70,7 +70,7 @@ struct trace_uprobe {
70 (sizeof(struct probe_arg) * (n))) 70 (sizeof(struct probe_arg) * (n)))
71 71
72static int register_uprobe_event(struct trace_uprobe *tu); 72static int register_uprobe_event(struct trace_uprobe *tu);
73static void unregister_uprobe_event(struct trace_uprobe *tu); 73static int unregister_uprobe_event(struct trace_uprobe *tu);
74 74
75static DEFINE_MUTEX(uprobe_lock); 75static DEFINE_MUTEX(uprobe_lock);
76static LIST_HEAD(uprobe_list); 76static LIST_HEAD(uprobe_list);
@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
164} 164}
165 165
166/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ 166/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
167static void unregister_trace_uprobe(struct trace_uprobe *tu) 167static int unregister_trace_uprobe(struct trace_uprobe *tu)
168{ 168{
169 int ret;
170
171 ret = unregister_uprobe_event(tu);
172 if (ret)
173 return ret;
174
169 list_del(&tu->list); 175 list_del(&tu->list);
170 unregister_uprobe_event(tu);
171 free_trace_uprobe(tu); 176 free_trace_uprobe(tu);
177 return 0;
172} 178}
173 179
174/* Register a trace_uprobe and probe_event */ 180/* Register a trace_uprobe and probe_event */
@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
181 187
182 /* register as an event */ 188 /* register as an event */
183 old_tp = find_probe_event(tu->call.name, tu->call.class->system); 189 old_tp = find_probe_event(tu->call.name, tu->call.class->system);
184 if (old_tp) 190 if (old_tp) {
185 /* delete old event */ 191 /* delete old event */
186 unregister_trace_uprobe(old_tp); 192 ret = unregister_trace_uprobe(old_tp);
193 if (ret)
194 goto end;
195 }
187 196
188 ret = register_uprobe_event(tu); 197 ret = register_uprobe_event(tu);
189 if (ret) { 198 if (ret) {
@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
256 group = UPROBE_EVENT_SYSTEM; 265 group = UPROBE_EVENT_SYSTEM;
257 266
258 if (is_delete) { 267 if (is_delete) {
268 int ret;
269
259 if (!event) { 270 if (!event) {
260 pr_info("Delete command needs an event name.\n"); 271 pr_info("Delete command needs an event name.\n");
261 return -EINVAL; 272 return -EINVAL;
@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
269 return -ENOENT; 280 return -ENOENT;
270 } 281 }
271 /* delete an event */ 282 /* delete an event */
272 unregister_trace_uprobe(tu); 283 ret = unregister_trace_uprobe(tu);
273 mutex_unlock(&uprobe_lock); 284 mutex_unlock(&uprobe_lock);
274 return 0; 285 return ret;
275 } 286 }
276 287
277 if (argc < 2) { 288 if (argc < 2) {
@@ -408,16 +419,20 @@ fail_address_parse:
408 return ret; 419 return ret;
409} 420}
410 421
411static void cleanup_all_probes(void) 422static int cleanup_all_probes(void)
412{ 423{
413 struct trace_uprobe *tu; 424 struct trace_uprobe *tu;
425 int ret = 0;
414 426
415 mutex_lock(&uprobe_lock); 427 mutex_lock(&uprobe_lock);
416 while (!list_empty(&uprobe_list)) { 428 while (!list_empty(&uprobe_list)) {
417 tu = list_entry(uprobe_list.next, struct trace_uprobe, list); 429 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
418 unregister_trace_uprobe(tu); 430 ret = unregister_trace_uprobe(tu);
431 if (ret)
432 break;
419 } 433 }
420 mutex_unlock(&uprobe_lock); 434 mutex_unlock(&uprobe_lock);
435 return ret;
421} 436}
422 437
423/* Probes listing interfaces */ 438/* Probes listing interfaces */
@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
462 477
463static int probes_open(struct inode *inode, struct file *file) 478static int probes_open(struct inode *inode, struct file *file)
464{ 479{
465 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 480 int ret;
466 cleanup_all_probes(); 481
482 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
483 ret = cleanup_all_probes();
484 if (ret)
485 return ret;
486 }
467 487
468 return seq_open(file, &probes_seq_op); 488 return seq_open(file, &probes_seq_op);
469} 489}
@@ -818,8 +838,6 @@ static void uprobe_perf_print(struct trace_uprobe *tu,
818 838
819 size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 839 size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
820 size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); 840 size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32);
821 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
822 return;
823 841
824 preempt_disable(); 842 preempt_disable();
825 head = this_cpu_ptr(call->perf_events); 843 head = this_cpu_ptr(call->perf_events);
@@ -970,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
970 return ret; 988 return ret;
971} 989}
972 990
973static void unregister_uprobe_event(struct trace_uprobe *tu) 991static int unregister_uprobe_event(struct trace_uprobe *tu)
974{ 992{
993 int ret;
994
975 /* tu->event is unregistered in trace_remove_event_call() */ 995 /* tu->event is unregistered in trace_remove_event_call() */
976 trace_remove_event_call(&tu->call); 996 ret = trace_remove_event_call(&tu->call);
997 if (ret)
998 return ret;
977 kfree(tu->call.print_fmt); 999 kfree(tu->call.print_fmt);
978 tu->call.print_fmt = NULL; 1000 tu->call.print_fmt = NULL;
1001 return 0;
979} 1002}
980 1003
981/* Make a trace interface for controling probe points */ 1004/* Make a trace interface for controling probe points */