aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-07-21 03:32:40 -0400
committerIngo Molnar <mingo@elte.hu>2011-07-21 03:32:40 -0400
commit40bcea7bbe8fe452a2d272e2ffd3dea281eec9ff (patch)
treeaedb6d02e53e3cf84cc32fd81db84032cee205e1 /kernel/trace
parent492f73a303b488ffd67097b2351d54aa6e6c7c73 (diff)
parent14a8fd7ceea6915c613746203d6e9a2bf273f16c (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c95
-rw-r--r--kernel/trace/trace.c92
-rw-r--r--kernel/trace/trace.h50
-rw-r--r--kernel/trace/trace_entries.h3
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_kprobe.c318
-rw-r--r--kernel/trace/trace_output.c11
7 files changed, 419 insertions, 152 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a0e246e2cee3..c3e4575e7829 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -88,6 +88,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
88static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; 88static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
89static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 89static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
90ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 90ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
91static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
91ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 92ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
92ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 93ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
93static struct ftrace_ops global_ops; 94static struct ftrace_ops global_ops;
@@ -146,9 +147,11 @@ void clear_ftrace_function(void)
146{ 147{
147 ftrace_trace_function = ftrace_stub; 148 ftrace_trace_function = ftrace_stub;
148 __ftrace_trace_function = ftrace_stub; 149 __ftrace_trace_function = ftrace_stub;
150 __ftrace_trace_function_delay = ftrace_stub;
149 ftrace_pid_function = ftrace_stub; 151 ftrace_pid_function = ftrace_stub;
150} 152}
151 153
154#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
152#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 155#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
153/* 156/*
154 * For those archs that do not test ftrace_trace_stop in their 157 * For those archs that do not test ftrace_trace_stop in their
@@ -208,7 +211,12 @@ static void update_ftrace_function(void)
208#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 211#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
209 ftrace_trace_function = func; 212 ftrace_trace_function = func;
210#else 213#else
214#ifdef CONFIG_DYNAMIC_FTRACE
215 /* do not update till all functions have been modified */
216 __ftrace_trace_function_delay = func;
217#else
211 __ftrace_trace_function = func; 218 __ftrace_trace_function = func;
219#endif
212 ftrace_trace_function = ftrace_test_stop_func; 220 ftrace_trace_function = ftrace_test_stop_func;
213#endif 221#endif
214} 222}
@@ -1170,8 +1178,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1170 return NULL; 1178 return NULL;
1171} 1179}
1172 1180
1181static void
1182ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1183static void
1184ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1185
1173static int 1186static int
1174ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) 1187ftrace_hash_move(struct ftrace_ops *ops, int enable,
1188 struct ftrace_hash **dst, struct ftrace_hash *src)
1175{ 1189{
1176 struct ftrace_func_entry *entry; 1190 struct ftrace_func_entry *entry;
1177 struct hlist_node *tp, *tn; 1191 struct hlist_node *tp, *tn;
@@ -1181,9 +1195,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1181 unsigned long key; 1195 unsigned long key;
1182 int size = src->count; 1196 int size = src->count;
1183 int bits = 0; 1197 int bits = 0;
1198 int ret;
1184 int i; 1199 int i;
1185 1200
1186 /* 1201 /*
1202 * Remove the current set, update the hash and add
1203 * them back.
1204 */
1205 ftrace_hash_rec_disable(ops, enable);
1206
1207 /*
1187 * If the new source is empty, just free dst and assign it 1208 * If the new source is empty, just free dst and assign it
1188 * the empty_hash. 1209 * the empty_hash.
1189 */ 1210 */
@@ -1203,9 +1224,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1203 if (bits > FTRACE_HASH_MAX_BITS) 1224 if (bits > FTRACE_HASH_MAX_BITS)
1204 bits = FTRACE_HASH_MAX_BITS; 1225 bits = FTRACE_HASH_MAX_BITS;
1205 1226
1227 ret = -ENOMEM;
1206 new_hash = alloc_ftrace_hash(bits); 1228 new_hash = alloc_ftrace_hash(bits);
1207 if (!new_hash) 1229 if (!new_hash)
1208 return -ENOMEM; 1230 goto out;
1209 1231
1210 size = 1 << src->size_bits; 1232 size = 1 << src->size_bits;
1211 for (i = 0; i < size; i++) { 1233 for (i = 0; i < size; i++) {
@@ -1224,7 +1246,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1224 rcu_assign_pointer(*dst, new_hash); 1246 rcu_assign_pointer(*dst, new_hash);
1225 free_ftrace_hash_rcu(old_hash); 1247 free_ftrace_hash_rcu(old_hash);
1226 1248
1227 return 0; 1249 ret = 0;
1250 out:
1251 /*
1252 * Enable regardless of ret:
1253 * On success, we enable the new hash.
1254 * On failure, we re-enable the original hash.
1255 */
1256 ftrace_hash_rec_enable(ops, enable);
1257
1258 return ret;
1228} 1259}
1229 1260
1230/* 1261/*
@@ -1584,6 +1615,12 @@ static int __ftrace_modify_code(void *data)
1584{ 1615{
1585 int *command = data; 1616 int *command = data;
1586 1617
1618 /*
1619 * Do not call function tracer while we update the code.
1620 * We are in stop machine, no worrying about races.
1621 */
1622 function_trace_stop++;
1623
1587 if (*command & FTRACE_ENABLE_CALLS) 1624 if (*command & FTRACE_ENABLE_CALLS)
1588 ftrace_replace_code(1); 1625 ftrace_replace_code(1);
1589 else if (*command & FTRACE_DISABLE_CALLS) 1626 else if (*command & FTRACE_DISABLE_CALLS)
@@ -1597,6 +1634,18 @@ static int __ftrace_modify_code(void *data)
1597 else if (*command & FTRACE_STOP_FUNC_RET) 1634 else if (*command & FTRACE_STOP_FUNC_RET)
1598 ftrace_disable_ftrace_graph_caller(); 1635 ftrace_disable_ftrace_graph_caller();
1599 1636
1637#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1638 /*
1639 * For archs that call ftrace_test_stop_func(), we must
1640 * wait till after we update all the function callers
1641 * before we update the callback. This keeps different
1642 * ops that record different functions from corrupting
1643 * each other.
1644 */
1645 __ftrace_trace_function = __ftrace_trace_function_delay;
1646#endif
1647 function_trace_stop--;
1648
1600 return 0; 1649 return 0;
1601} 1650}
1602 1651
@@ -2865,7 +2914,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2865 ftrace_match_records(hash, buf, len); 2914 ftrace_match_records(hash, buf, len);
2866 2915
2867 mutex_lock(&ftrace_lock); 2916 mutex_lock(&ftrace_lock);
2868 ret = ftrace_hash_move(orig_hash, hash); 2917 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2918 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2919 && ftrace_enabled)
2920 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2921
2869 mutex_unlock(&ftrace_lock); 2922 mutex_unlock(&ftrace_lock);
2870 2923
2871 mutex_unlock(&ftrace_regex_lock); 2924 mutex_unlock(&ftrace_regex_lock);
@@ -3048,18 +3101,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
3048 orig_hash = &iter->ops->notrace_hash; 3101 orig_hash = &iter->ops->notrace_hash;
3049 3102
3050 mutex_lock(&ftrace_lock); 3103 mutex_lock(&ftrace_lock);
3051 /* 3104 ret = ftrace_hash_move(iter->ops, filter_hash,
3052 * Remove the current set, update the hash and add 3105 orig_hash, iter->hash);
3053 * them back. 3106 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3054 */ 3107 && ftrace_enabled)
3055 ftrace_hash_rec_disable(iter->ops, filter_hash); 3108 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3056 ret = ftrace_hash_move(orig_hash, iter->hash); 3109
3057 if (!ret) {
3058 ftrace_hash_rec_enable(iter->ops, filter_hash);
3059 if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3060 && ftrace_enabled)
3061 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3062 }
3063 mutex_unlock(&ftrace_lock); 3110 mutex_unlock(&ftrace_lock);
3064 } 3111 }
3065 free_ftrace_hash(iter->hash); 3112 free_ftrace_hash(iter->hash);
@@ -3338,7 +3385,7 @@ static int ftrace_process_locs(struct module *mod,
3338{ 3385{
3339 unsigned long *p; 3386 unsigned long *p;
3340 unsigned long addr; 3387 unsigned long addr;
3341 unsigned long flags; 3388 unsigned long flags = 0; /* Shut up gcc */
3342 3389
3343 mutex_lock(&ftrace_lock); 3390 mutex_lock(&ftrace_lock);
3344 p = start; 3391 p = start;
@@ -3356,12 +3403,18 @@ static int ftrace_process_locs(struct module *mod,
3356 } 3403 }
3357 3404
3358 /* 3405 /*
3359 * Disable interrupts to prevent interrupts from executing 3406 * We only need to disable interrupts on start up
3360 * code that is being modified. 3407 * because we are modifying code that an interrupt
3408 * may execute, and the modification is not atomic.
3409 * But for modules, nothing runs the code we modify
3410 * until we are finished with it, and there's no
3411 * reason to cause large interrupt latencies while we do it.
3361 */ 3412 */
3362 local_irq_save(flags); 3413 if (!mod)
3414 local_irq_save(flags);
3363 ftrace_update_code(mod); 3415 ftrace_update_code(mod);
3364 local_irq_restore(flags); 3416 if (!mod)
3417 local_irq_restore(flags);
3365 mutex_unlock(&ftrace_lock); 3418 mutex_unlock(&ftrace_lock);
3366 3419
3367 return 0; 3420 return 0;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d9c16123f6e2..e5df02c69b1d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1248,6 +1248,15 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1248} 1248}
1249 1249
1250#ifdef CONFIG_STACKTRACE 1250#ifdef CONFIG_STACKTRACE
1251
1252#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1253struct ftrace_stack {
1254 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1255};
1256
1257static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1258static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1259
1251static void __ftrace_trace_stack(struct ring_buffer *buffer, 1260static void __ftrace_trace_stack(struct ring_buffer *buffer,
1252 unsigned long flags, 1261 unsigned long flags,
1253 int skip, int pc, struct pt_regs *regs) 1262 int skip, int pc, struct pt_regs *regs)
@@ -1256,25 +1265,77 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1256 struct ring_buffer_event *event; 1265 struct ring_buffer_event *event;
1257 struct stack_entry *entry; 1266 struct stack_entry *entry;
1258 struct stack_trace trace; 1267 struct stack_trace trace;
1268 int use_stack;
1269 int size = FTRACE_STACK_ENTRIES;
1270
1271 trace.nr_entries = 0;
1272 trace.skip = skip;
1273
1274 /*
1275 * Since events can happen in NMIs there's no safe way to
1276 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1277 * or NMI comes in, it will just have to use the default
1278 * FTRACE_STACK_SIZE.
1279 */
1280 preempt_disable_notrace();
1281
1282 use_stack = ++__get_cpu_var(ftrace_stack_reserve);
1283 /*
1284 * We don't need any atomic variables, just a barrier.
1285 * If an interrupt comes in, we don't care, because it would
1286 * have exited and put the counter back to what we want.
1287 * We just need a barrier to keep gcc from moving things
1288 * around.
1289 */
1290 barrier();
1291 if (use_stack == 1) {
1292 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1293 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1294
1295 if (regs)
1296 save_stack_trace_regs(regs, &trace);
1297 else
1298 save_stack_trace(&trace);
1299
1300 if (trace.nr_entries > size)
1301 size = trace.nr_entries;
1302 } else
1303 /* From now on, use_stack is a boolean */
1304 use_stack = 0;
1305
1306 size *= sizeof(unsigned long);
1259 1307
1260 event = trace_buffer_lock_reserve(buffer, TRACE_STACK, 1308 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1261 sizeof(*entry), flags, pc); 1309 sizeof(*entry) + size, flags, pc);
1262 if (!event) 1310 if (!event)
1263 return; 1311 goto out;
1264 entry = ring_buffer_event_data(event); 1312 entry = ring_buffer_event_data(event);
1265 memset(&entry->caller, 0, sizeof(entry->caller));
1266 1313
1267 trace.nr_entries = 0; 1314 memset(&entry->caller, 0, size);
1268 trace.max_entries = FTRACE_STACK_ENTRIES; 1315
1269 trace.skip = skip; 1316 if (use_stack)
1270 trace.entries = entry->caller; 1317 memcpy(&entry->caller, trace.entries,
1318 trace.nr_entries * sizeof(unsigned long));
1319 else {
1320 trace.max_entries = FTRACE_STACK_ENTRIES;
1321 trace.entries = entry->caller;
1322 if (regs)
1323 save_stack_trace_regs(regs, &trace);
1324 else
1325 save_stack_trace(&trace);
1326 }
1327
1328 entry->size = trace.nr_entries;
1271 1329
1272 if (regs)
1273 save_stack_trace_regs(regs, &trace);
1274 else
1275 save_stack_trace(&trace);
1276 if (!filter_check_discard(call, entry, buffer, event)) 1330 if (!filter_check_discard(call, entry, buffer, event))
1277 ring_buffer_unlock_commit(buffer, event); 1331 ring_buffer_unlock_commit(buffer, event);
1332
1333 out:
1334 /* Again, don't let gcc optimize things here */
1335 barrier();
1336 __get_cpu_var(ftrace_stack_reserve)--;
1337 preempt_enable_notrace();
1338
1278} 1339}
1279 1340
1280void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, 1341void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
@@ -1562,7 +1623,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1562 1623
1563 ftrace_enable_cpu(); 1624 ftrace_enable_cpu();
1564 1625
1565 return event ? ring_buffer_event_data(event) : NULL; 1626 if (event) {
1627 iter->ent_size = ring_buffer_event_length(event);
1628 return ring_buffer_event_data(event);
1629 }
1630 iter->ent_size = 0;
1631 return NULL;
1566} 1632}
1567 1633
1568static struct trace_entry * 1634static struct trace_entry *
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 30a94c26dcb3..3f381d0b20a8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -278,6 +278,29 @@ struct tracer {
278}; 278};
279 279
280 280
281/* Only current can touch trace_recursion */
282#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
283#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
284
285/* Ring buffer has the 10 LSB bits to count */
286#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
287
288/* for function tracing recursion */
289#define TRACE_INTERNAL_BIT (1<<11)
290#define TRACE_GLOBAL_BIT (1<<12)
291/*
292 * Abuse of the trace_recursion.
293 * As we need a way to maintain state if we are tracing the function
294 * graph in irq because we want to trace a particular function that
295 * was called in irq context but we have irq tracing off. Since this
296 * can only be modified by current, we can reuse trace_recursion.
297 */
298#define TRACE_IRQ_BIT (1<<13)
299
300#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
301#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
302#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
303
281#define TRACE_PIPE_ALL_CPU -1 304#define TRACE_PIPE_ALL_CPU -1
282 305
283int tracer_init(struct tracer *t, struct trace_array *tr); 306int tracer_init(struct tracer *t, struct trace_array *tr);
@@ -516,8 +539,18 @@ static inline int ftrace_graph_addr(unsigned long addr)
516 return 1; 539 return 1;
517 540
518 for (i = 0; i < ftrace_graph_count; i++) { 541 for (i = 0; i < ftrace_graph_count; i++) {
519 if (addr == ftrace_graph_funcs[i]) 542 if (addr == ftrace_graph_funcs[i]) {
543 /*
544 * If no irqs are to be traced, but a set_graph_function
545 * is set, and called by an interrupt handler, we still
546 * want to trace it.
547 */
548 if (in_irq())
549 trace_recursion_set(TRACE_IRQ_BIT);
550 else
551 trace_recursion_clear(TRACE_IRQ_BIT);
520 return 1; 552 return 1;
553 }
521 } 554 }
522 555
523 return 0; 556 return 0;
@@ -795,19 +828,4 @@ extern const char *__stop___trace_bprintk_fmt[];
795 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 828 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
796#include "trace_entries.h" 829#include "trace_entries.h"
797 830
798/* Only current can touch trace_recursion */
799#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
800#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
801
802/* Ring buffer has the 10 LSB bits to count */
803#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
804
805/* for function tracing recursion */
806#define TRACE_INTERNAL_BIT (1<<11)
807#define TRACE_GLOBAL_BIT (1<<12)
808
809#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
810#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
811#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
812
813#endif /* _LINUX_KERNEL_TRACE_H */ 831#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index e32744c84d94..93365907f219 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -161,7 +161,8 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
161 TRACE_STACK, 161 TRACE_STACK,
162 162
163 F_STRUCT( 163 F_STRUCT(
164 __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) 164 __field( int, size )
165 __dynamic_array(unsigned long, caller )
165 ), 166 ),
166 167
167 F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" 168 F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e8d6bb55d719..a7d2a4c653d8 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -227,7 +227,7 @@ int __trace_graph_entry(struct trace_array *tr,
227 227
228static inline int ftrace_graph_ignore_irqs(void) 228static inline int ftrace_graph_ignore_irqs(void)
229{ 229{
230 if (!ftrace_graph_skip_irqs) 230 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
231 return 0; 231 return 0;
232 232
233 return in_irq(); 233 return in_irq();
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 7db7b68c6c37..5fb3697bf0e5 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -343,6 +343,14 @@ DEFINE_BASIC_FETCH_FUNCS(deref)
343DEFINE_FETCH_deref(string) 343DEFINE_FETCH_deref(string)
344DEFINE_FETCH_deref(string_size) 344DEFINE_FETCH_deref(string_size)
345 345
346static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data)
347{
348 if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
349 update_deref_fetch_param(data->orig.data);
350 else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
351 update_symbol_cache(data->orig.data);
352}
353
346static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) 354static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
347{ 355{
348 if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) 356 if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
@@ -377,6 +385,19 @@ DEFINE_BASIC_FETCH_FUNCS(bitfield)
377#define fetch_bitfield_string_size NULL 385#define fetch_bitfield_string_size NULL
378 386
379static __kprobes void 387static __kprobes void
388update_bitfield_fetch_param(struct bitfield_fetch_param *data)
389{
390 /*
391 * Don't check the bitfield itself, because this must be the
392 * last fetch function.
393 */
394 if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
395 update_deref_fetch_param(data->orig.data);
396 else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
397 update_symbol_cache(data->orig.data);
398}
399
400static __kprobes void
380free_bitfield_fetch_param(struct bitfield_fetch_param *data) 401free_bitfield_fetch_param(struct bitfield_fetch_param *data)
381{ 402{
382 /* 403 /*
@@ -389,6 +410,7 @@ free_bitfield_fetch_param(struct bitfield_fetch_param *data)
389 free_symbol_cache(data->orig.data); 410 free_symbol_cache(data->orig.data);
390 kfree(data); 411 kfree(data);
391} 412}
413
392/* Default (unsigned long) fetch type */ 414/* Default (unsigned long) fetch type */
393#define __DEFAULT_FETCH_TYPE(t) u##t 415#define __DEFAULT_FETCH_TYPE(t) u##t
394#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) 416#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
@@ -536,6 +558,7 @@ struct probe_arg {
536/* Flags for trace_probe */ 558/* Flags for trace_probe */
537#define TP_FLAG_TRACE 1 559#define TP_FLAG_TRACE 1
538#define TP_FLAG_PROFILE 2 560#define TP_FLAG_PROFILE 2
561#define TP_FLAG_REGISTERED 4
539 562
540struct trace_probe { 563struct trace_probe {
541 struct list_head list; 564 struct list_head list;
@@ -555,16 +578,49 @@ struct trace_probe {
555 (sizeof(struct probe_arg) * (n))) 578 (sizeof(struct probe_arg) * (n)))
556 579
557 580
558static __kprobes int probe_is_return(struct trace_probe *tp) 581static __kprobes int trace_probe_is_return(struct trace_probe *tp)
559{ 582{
560 return tp->rp.handler != NULL; 583 return tp->rp.handler != NULL;
561} 584}
562 585
563static __kprobes const char *probe_symbol(struct trace_probe *tp) 586static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
564{ 587{
565 return tp->symbol ? tp->symbol : "unknown"; 588 return tp->symbol ? tp->symbol : "unknown";
566} 589}
567 590
591static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
592{
593 return tp->rp.kp.offset;
594}
595
596static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
597{
598 return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
599}
600
601static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
602{
603 return !!(tp->flags & TP_FLAG_REGISTERED);
604}
605
606static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
607{
608 return !!(kprobe_gone(&tp->rp.kp));
609}
610
611static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
612 struct module *mod)
613{
614 int len = strlen(mod->name);
615 const char *name = trace_probe_symbol(tp);
616 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
617}
618
619static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
620{
621 return !!strchr(trace_probe_symbol(tp), ':');
622}
623
568static int register_probe_event(struct trace_probe *tp); 624static int register_probe_event(struct trace_probe *tp);
569static void unregister_probe_event(struct trace_probe *tp); 625static void unregister_probe_event(struct trace_probe *tp);
570 626
@@ -646,6 +702,16 @@ error:
646 return ERR_PTR(ret); 702 return ERR_PTR(ret);
647} 703}
648 704
705static void update_probe_arg(struct probe_arg *arg)
706{
707 if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
708 update_bitfield_fetch_param(arg->fetch.data);
709 else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
710 update_deref_fetch_param(arg->fetch.data);
711 else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
712 update_symbol_cache(arg->fetch.data);
713}
714
649static void free_probe_arg(struct probe_arg *arg) 715static void free_probe_arg(struct probe_arg *arg)
650{ 716{
651 if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn)) 717 if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
@@ -671,7 +737,7 @@ static void free_trace_probe(struct trace_probe *tp)
671 kfree(tp); 737 kfree(tp);
672} 738}
673 739
674static struct trace_probe *find_probe_event(const char *event, 740static struct trace_probe *find_trace_probe(const char *event,
675 const char *group) 741 const char *group)
676{ 742{
677 struct trace_probe *tp; 743 struct trace_probe *tp;
@@ -683,13 +749,96 @@ static struct trace_probe *find_probe_event(const char *event,
683 return NULL; 749 return NULL;
684} 750}
685 751
752/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
753static int enable_trace_probe(struct trace_probe *tp, int flag)
754{
755 int ret = 0;
756
757 tp->flags |= flag;
758 if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
759 !trace_probe_has_gone(tp)) {
760 if (trace_probe_is_return(tp))
761 ret = enable_kretprobe(&tp->rp);
762 else
763 ret = enable_kprobe(&tp->rp.kp);
764 }
765
766 return ret;
767}
768
769/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
770static void disable_trace_probe(struct trace_probe *tp, int flag)
771{
772 tp->flags &= ~flag;
773 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
774 if (trace_probe_is_return(tp))
775 disable_kretprobe(&tp->rp);
776 else
777 disable_kprobe(&tp->rp.kp);
778 }
779}
780
781/* Internal register function - just handle k*probes and flags */
782static int __register_trace_probe(struct trace_probe *tp)
783{
784 int i, ret;
785
786 if (trace_probe_is_registered(tp))
787 return -EINVAL;
788
789 for (i = 0; i < tp->nr_args; i++)
790 update_probe_arg(&tp->args[i]);
791
792 /* Set/clear disabled flag according to tp->flag */
793 if (trace_probe_is_enabled(tp))
794 tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
795 else
796 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
797
798 if (trace_probe_is_return(tp))
799 ret = register_kretprobe(&tp->rp);
800 else
801 ret = register_kprobe(&tp->rp.kp);
802
803 if (ret == 0)
804 tp->flags |= TP_FLAG_REGISTERED;
805 else {
806 pr_warning("Could not insert probe at %s+%lu: %d\n",
807 trace_probe_symbol(tp), trace_probe_offset(tp), ret);
808 if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
809 pr_warning("This probe might be able to register after"
810 "target module is loaded. Continue.\n");
811 ret = 0;
812 } else if (ret == -EILSEQ) {
813 pr_warning("Probing address(0x%p) is not an "
814 "instruction boundary.\n",
815 tp->rp.kp.addr);
816 ret = -EINVAL;
817 }
818 }
819
820 return ret;
821}
822
823/* Internal unregister function - just handle k*probes and flags */
824static void __unregister_trace_probe(struct trace_probe *tp)
825{
826 if (trace_probe_is_registered(tp)) {
827 if (trace_probe_is_return(tp))
828 unregister_kretprobe(&tp->rp);
829 else
830 unregister_kprobe(&tp->rp.kp);
831 tp->flags &= ~TP_FLAG_REGISTERED;
832 /* Cleanup kprobe for reuse */
833 if (tp->rp.kp.symbol_name)
834 tp->rp.kp.addr = NULL;
835 }
836}
837
686/* Unregister a trace_probe and probe_event: call with locking probe_lock */ 838/* Unregister a trace_probe and probe_event: call with locking probe_lock */
687static void unregister_trace_probe(struct trace_probe *tp) 839static void unregister_trace_probe(struct trace_probe *tp)
688{ 840{
689 if (probe_is_return(tp)) 841 __unregister_trace_probe(tp);
690 unregister_kretprobe(&tp->rp);
691 else
692 unregister_kprobe(&tp->rp.kp);
693 list_del(&tp->list); 842 list_del(&tp->list);
694 unregister_probe_event(tp); 843 unregister_probe_event(tp);
695} 844}
@@ -702,41 +851,65 @@ static int register_trace_probe(struct trace_probe *tp)
702 851
703 mutex_lock(&probe_lock); 852 mutex_lock(&probe_lock);
704 853
705 /* register as an event */ 854 /* Delete old (same name) event if exist */
706 old_tp = find_probe_event(tp->call.name, tp->call.class->system); 855 old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
707 if (old_tp) { 856 if (old_tp) {
708 /* delete old event */
709 unregister_trace_probe(old_tp); 857 unregister_trace_probe(old_tp);
710 free_trace_probe(old_tp); 858 free_trace_probe(old_tp);
711 } 859 }
860
861 /* Register new event */
712 ret = register_probe_event(tp); 862 ret = register_probe_event(tp);
713 if (ret) { 863 if (ret) {
714 pr_warning("Failed to register probe event(%d)\n", ret); 864 pr_warning("Failed to register probe event(%d)\n", ret);
715 goto end; 865 goto end;
716 } 866 }
717 867
718 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; 868 /* Register k*probe */
719 if (probe_is_return(tp)) 869 ret = __register_trace_probe(tp);
720 ret = register_kretprobe(&tp->rp); 870 if (ret < 0)
721 else
722 ret = register_kprobe(&tp->rp.kp);
723
724 if (ret) {
725 pr_warning("Could not insert probe(%d)\n", ret);
726 if (ret == -EILSEQ) {
727 pr_warning("Probing address(0x%p) is not an "
728 "instruction boundary.\n",
729 tp->rp.kp.addr);
730 ret = -EINVAL;
731 }
732 unregister_probe_event(tp); 871 unregister_probe_event(tp);
733 } else 872 else
734 list_add_tail(&tp->list, &probe_list); 873 list_add_tail(&tp->list, &probe_list);
874
735end: 875end:
736 mutex_unlock(&probe_lock); 876 mutex_unlock(&probe_lock);
737 return ret; 877 return ret;
738} 878}
739 879
880/* Module notifier call back, checking event on the module */
881static int trace_probe_module_callback(struct notifier_block *nb,
882 unsigned long val, void *data)
883{
884 struct module *mod = data;
885 struct trace_probe *tp;
886 int ret;
887
888 if (val != MODULE_STATE_COMING)
889 return NOTIFY_DONE;
890
891 /* Update probes on coming module */
892 mutex_lock(&probe_lock);
893 list_for_each_entry(tp, &probe_list, list) {
894 if (trace_probe_within_module(tp, mod)) {
895 __unregister_trace_probe(tp);
896 ret = __register_trace_probe(tp);
897 if (ret)
898 pr_warning("Failed to re-register probe %s on"
899 "%s: %d\n",
900 tp->call.name, mod->name, ret);
901 }
902 }
903 mutex_unlock(&probe_lock);
904
905 return NOTIFY_DONE;
906}
907
908static struct notifier_block trace_probe_module_nb = {
909 .notifier_call = trace_probe_module_callback,
910 .priority = 1 /* Invoked after kprobe module callback */
911};
912
740/* Split symbol and offset. */ 913/* Split symbol and offset. */
741static int split_symbol_offset(char *symbol, unsigned long *offset) 914static int split_symbol_offset(char *symbol, unsigned long *offset)
742{ 915{
@@ -962,8 +1135,8 @@ static int create_trace_probe(int argc, char **argv)
962{ 1135{
963 /* 1136 /*
964 * Argument syntax: 1137 * Argument syntax:
965 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] 1138 * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
966 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] 1139 * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
967 * Fetch args: 1140 * Fetch args:
968 * $retval : fetch return value 1141 * $retval : fetch return value
969 * $stack : fetch stack address 1142 * $stack : fetch stack address
@@ -1025,7 +1198,7 @@ static int create_trace_probe(int argc, char **argv)
1025 return -EINVAL; 1198 return -EINVAL;
1026 } 1199 }
1027 mutex_lock(&probe_lock); 1200 mutex_lock(&probe_lock);
1028 tp = find_probe_event(event, group); 1201 tp = find_trace_probe(event, group);
1029 if (!tp) { 1202 if (!tp) {
1030 mutex_unlock(&probe_lock); 1203 mutex_unlock(&probe_lock);
1031 pr_info("Event %s/%s doesn't exist.\n", group, event); 1204 pr_info("Event %s/%s doesn't exist.\n", group, event);
@@ -1144,7 +1317,7 @@ error:
1144 return ret; 1317 return ret;
1145} 1318}
1146 1319
1147static void cleanup_all_probes(void) 1320static void release_all_trace_probes(void)
1148{ 1321{
1149 struct trace_probe *tp; 1322 struct trace_probe *tp;
1150 1323
@@ -1158,7 +1331,6 @@ static void cleanup_all_probes(void)
1158 mutex_unlock(&probe_lock); 1331 mutex_unlock(&probe_lock);
1159} 1332}
1160 1333
1161
1162/* Probes listing interfaces */ 1334/* Probes listing interfaces */
1163static void *probes_seq_start(struct seq_file *m, loff_t *pos) 1335static void *probes_seq_start(struct seq_file *m, loff_t *pos)
1164{ 1336{
@@ -1181,15 +1353,16 @@ static int probes_seq_show(struct seq_file *m, void *v)
1181 struct trace_probe *tp = v; 1353 struct trace_probe *tp = v;
1182 int i; 1354 int i;
1183 1355
1184 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); 1356 seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
1185 seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name); 1357 seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
1186 1358
1187 if (!tp->symbol) 1359 if (!tp->symbol)
1188 seq_printf(m, " 0x%p", tp->rp.kp.addr); 1360 seq_printf(m, " 0x%p", tp->rp.kp.addr);
1189 else if (tp->rp.kp.offset) 1361 else if (tp->rp.kp.offset)
1190 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); 1362 seq_printf(m, " %s+%u", trace_probe_symbol(tp),
1363 tp->rp.kp.offset);
1191 else 1364 else
1192 seq_printf(m, " %s", probe_symbol(tp)); 1365 seq_printf(m, " %s", trace_probe_symbol(tp));
1193 1366
1194 for (i = 0; i < tp->nr_args; i++) 1367 for (i = 0; i < tp->nr_args; i++)
1195 seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm); 1368 seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
@@ -1209,7 +1382,7 @@ static int probes_open(struct inode *inode, struct file *file)
1209{ 1382{
1210 if ((file->f_mode & FMODE_WRITE) && 1383 if ((file->f_mode & FMODE_WRITE) &&
1211 (file->f_flags & O_TRUNC)) 1384 (file->f_flags & O_TRUNC))
1212 cleanup_all_probes(); 1385 release_all_trace_probes();
1213 1386
1214 return seq_open(file, &probes_seq_op); 1387 return seq_open(file, &probes_seq_op);
1215} 1388}
@@ -1513,30 +1686,6 @@ partial:
1513 return TRACE_TYPE_PARTIAL_LINE; 1686 return TRACE_TYPE_PARTIAL_LINE;
1514} 1687}
1515 1688
1516static int probe_event_enable(struct ftrace_event_call *call)
1517{
1518 struct trace_probe *tp = (struct trace_probe *)call->data;
1519
1520 tp->flags |= TP_FLAG_TRACE;
1521 if (probe_is_return(tp))
1522 return enable_kretprobe(&tp->rp);
1523 else
1524 return enable_kprobe(&tp->rp.kp);
1525}
1526
1527static void probe_event_disable(struct ftrace_event_call *call)
1528{
1529 struct trace_probe *tp = (struct trace_probe *)call->data;
1530
1531 tp->flags &= ~TP_FLAG_TRACE;
1532 if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
1533 if (probe_is_return(tp))
1534 disable_kretprobe(&tp->rp);
1535 else
1536 disable_kprobe(&tp->rp.kp);
1537 }
1538}
1539
1540#undef DEFINE_FIELD 1689#undef DEFINE_FIELD
1541#define DEFINE_FIELD(type, item, name, is_signed) \ 1690#define DEFINE_FIELD(type, item, name, is_signed) \
1542 do { \ 1691 do { \
@@ -1598,7 +1747,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
1598 1747
1599 const char *fmt, *arg; 1748 const char *fmt, *arg;
1600 1749
1601 if (!probe_is_return(tp)) { 1750 if (!trace_probe_is_return(tp)) {
1602 fmt = "(%lx)"; 1751 fmt = "(%lx)";
1603 arg = "REC->" FIELD_STRING_IP; 1752 arg = "REC->" FIELD_STRING_IP;
1604 } else { 1753 } else {
@@ -1715,49 +1864,25 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1715 head = this_cpu_ptr(call->perf_events); 1864 head = this_cpu_ptr(call->perf_events);
1716 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); 1865 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
1717} 1866}
1718
1719static int probe_perf_enable(struct ftrace_event_call *call)
1720{
1721 struct trace_probe *tp = (struct trace_probe *)call->data;
1722
1723 tp->flags |= TP_FLAG_PROFILE;
1724
1725 if (probe_is_return(tp))
1726 return enable_kretprobe(&tp->rp);
1727 else
1728 return enable_kprobe(&tp->rp.kp);
1729}
1730
1731static void probe_perf_disable(struct ftrace_event_call *call)
1732{
1733 struct trace_probe *tp = (struct trace_probe *)call->data;
1734
1735 tp->flags &= ~TP_FLAG_PROFILE;
1736
1737 if (!(tp->flags & TP_FLAG_TRACE)) {
1738 if (probe_is_return(tp))
1739 disable_kretprobe(&tp->rp);
1740 else
1741 disable_kprobe(&tp->rp.kp);
1742 }
1743}
1744#endif /* CONFIG_PERF_EVENTS */ 1867#endif /* CONFIG_PERF_EVENTS */
1745 1868
1746static __kprobes 1869static __kprobes
1747int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) 1870int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
1748{ 1871{
1872 struct trace_probe *tp = (struct trace_probe *)event->data;
1873
1749 switch (type) { 1874 switch (type) {
1750 case TRACE_REG_REGISTER: 1875 case TRACE_REG_REGISTER:
1751 return probe_event_enable(event); 1876 return enable_trace_probe(tp, TP_FLAG_TRACE);
1752 case TRACE_REG_UNREGISTER: 1877 case TRACE_REG_UNREGISTER:
1753 probe_event_disable(event); 1878 disable_trace_probe(tp, TP_FLAG_TRACE);
1754 return 0; 1879 return 0;
1755 1880
1756#ifdef CONFIG_PERF_EVENTS 1881#ifdef CONFIG_PERF_EVENTS
1757 case TRACE_REG_PERF_REGISTER: 1882 case TRACE_REG_PERF_REGISTER:
1758 return probe_perf_enable(event); 1883 return enable_trace_probe(tp, TP_FLAG_PROFILE);
1759 case TRACE_REG_PERF_UNREGISTER: 1884 case TRACE_REG_PERF_UNREGISTER:
1760 probe_perf_disable(event); 1885 disable_trace_probe(tp, TP_FLAG_PROFILE);
1761 return 0; 1886 return 0;
1762#endif 1887#endif
1763 } 1888 }
@@ -1807,7 +1932,7 @@ static int register_probe_event(struct trace_probe *tp)
1807 1932
1808 /* Initialize ftrace_event_call */ 1933 /* Initialize ftrace_event_call */
1809 INIT_LIST_HEAD(&call->class->fields); 1934 INIT_LIST_HEAD(&call->class->fields);
1810 if (probe_is_return(tp)) { 1935 if (trace_probe_is_return(tp)) {
1811 call->event.funcs = &kretprobe_funcs; 1936 call->event.funcs = &kretprobe_funcs;
1812 call->class->define_fields = kretprobe_event_define_fields; 1937 call->class->define_fields = kretprobe_event_define_fields;
1813 } else { 1938 } else {
@@ -1846,6 +1971,9 @@ static __init int init_kprobe_trace(void)
1846 struct dentry *d_tracer; 1971 struct dentry *d_tracer;
1847 struct dentry *entry; 1972 struct dentry *entry;
1848 1973
1974 if (register_module_notifier(&trace_probe_module_nb))
1975 return -EINVAL;
1976
1849 d_tracer = tracing_init_dentry(); 1977 d_tracer = tracing_init_dentry();
1850 if (!d_tracer) 1978 if (!d_tracer)
1851 return 0; 1979 return 0;
@@ -1899,12 +2027,12 @@ static __init int kprobe_trace_self_tests_init(void)
1899 warn++; 2027 warn++;
1900 } else { 2028 } else {
1901 /* Enable trace point */ 2029 /* Enable trace point */
1902 tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM); 2030 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1903 if (WARN_ON_ONCE(tp == NULL)) { 2031 if (WARN_ON_ONCE(tp == NULL)) {
1904 pr_warning("error on getting new probe.\n"); 2032 pr_warning("error on getting new probe.\n");
1905 warn++; 2033 warn++;
1906 } else 2034 } else
1907 probe_event_enable(&tp->call); 2035 enable_trace_probe(tp, TP_FLAG_TRACE);
1908 } 2036 }
1909 2037
1910 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " 2038 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
@@ -1914,12 +2042,12 @@ static __init int kprobe_trace_self_tests_init(void)
1914 warn++; 2042 warn++;
1915 } else { 2043 } else {
1916 /* Enable trace point */ 2044 /* Enable trace point */
1917 tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM); 2045 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1918 if (WARN_ON_ONCE(tp == NULL)) { 2046 if (WARN_ON_ONCE(tp == NULL)) {
1919 pr_warning("error on getting new probe.\n"); 2047 pr_warning("error on getting new probe.\n");
1920 warn++; 2048 warn++;
1921 } else 2049 } else
1922 probe_event_enable(&tp->call); 2050 enable_trace_probe(tp, TP_FLAG_TRACE);
1923 } 2051 }
1924 2052
1925 if (warn) 2053 if (warn)
@@ -1940,7 +2068,7 @@ static __init int kprobe_trace_self_tests_init(void)
1940 } 2068 }
1941 2069
1942end: 2070end:
1943 cleanup_all_probes(); 2071 release_all_trace_probes();
1944 if (warn) 2072 if (warn)
1945 pr_cont("NG: Some tests are failed. Please check them.\n"); 2073 pr_cont("NG: Some tests are failed. Please check them.\n");
1946 else 2074 else
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index e37de492a9e1..51999309a6cf 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1107,19 +1107,20 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1107{ 1107{
1108 struct stack_entry *field; 1108 struct stack_entry *field;
1109 struct trace_seq *s = &iter->seq; 1109 struct trace_seq *s = &iter->seq;
1110 int i; 1110 unsigned long *p;
1111 unsigned long *end;
1111 1112
1112 trace_assign_type(field, iter->ent); 1113 trace_assign_type(field, iter->ent);
1114 end = (unsigned long *)((long)iter->ent + iter->ent_size);
1113 1115
1114 if (!trace_seq_puts(s, "<stack trace>\n")) 1116 if (!trace_seq_puts(s, "<stack trace>\n"))
1115 goto partial; 1117 goto partial;
1116 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1118
1117 if (!field->caller[i] || (field->caller[i] == ULONG_MAX)) 1119 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
1118 break;
1119 if (!trace_seq_puts(s, " => ")) 1120 if (!trace_seq_puts(s, " => "))
1120 goto partial; 1121 goto partial;
1121 1122
1122 if (!seq_print_ip_sym(s, field->caller[i], flags)) 1123 if (!seq_print_ip_sym(s, *p, flags))
1123 goto partial; 1124 goto partial;
1124 if (!trace_seq_puts(s, "\n")) 1125 if (!trace_seq_puts(s, "\n"))
1125 goto partial; 1126 goto partial;