diff options
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r-- | kernel/trace/trace_syscalls.c | 61 |
1 files changed, 47 insertions, 14 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 7609dd6714c2..7a809e321058 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <trace/syscall.h> | 1 | #include <trace/syscall.h> |
2 | #include <trace/events/syscalls.h> | 2 | #include <trace/events/syscalls.h> |
3 | #include <linux/syscalls.h> | ||
3 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
4 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
5 | #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ | 6 | #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ |
@@ -47,6 +48,38 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name | |||
47 | } | 48 | } |
48 | #endif | 49 | #endif |
49 | 50 | ||
51 | #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS | ||
52 | /* | ||
53 | * Some architectures that allow for 32bit applications | ||
54 | * to run on a 64bit kernel, do not map the syscalls for | ||
55 | * the 32bit tasks the same as they do for 64bit tasks. | ||
56 | * | ||
57 | * *cough*x86*cough* | ||
58 | * | ||
59 | * In such a case, instead of reporting the wrong syscalls, | ||
60 | * simply ignore them. | ||
61 | * | ||
62 | * For an arch to ignore the compat syscalls it needs to | ||
63 | * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as | ||
64 | * define the function arch_trace_is_compat_syscall() to let | ||
65 | * the tracing system know that it should ignore it. | ||
66 | */ | ||
67 | static int | ||
68 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) | ||
69 | { | ||
70 | if (unlikely(arch_trace_is_compat_syscall(regs))) | ||
71 | return -1; | ||
72 | |||
73 | return syscall_get_nr(task, regs); | ||
74 | } | ||
75 | #else | ||
76 | static inline int | ||
77 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) | ||
78 | { | ||
79 | return syscall_get_nr(task, regs); | ||
80 | } | ||
81 | #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ | ||
82 | |||
50 | static __init struct syscall_metadata * | 83 | static __init struct syscall_metadata * |
51 | find_syscall_meta(unsigned long syscall) | 84 | find_syscall_meta(unsigned long syscall) |
52 | { | 85 | { |
@@ -77,7 +110,7 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr) | |||
77 | return syscalls_metadata[nr]; | 110 | return syscalls_metadata[nr]; |
78 | } | 111 | } |
79 | 112 | ||
80 | enum print_line_t | 113 | static enum print_line_t |
81 | print_syscall_enter(struct trace_iterator *iter, int flags, | 114 | print_syscall_enter(struct trace_iterator *iter, int flags, |
82 | struct trace_event *event) | 115 | struct trace_event *event) |
83 | { | 116 | { |
@@ -130,7 +163,7 @@ end: | |||
130 | return TRACE_TYPE_HANDLED; | 163 | return TRACE_TYPE_HANDLED; |
131 | } | 164 | } |
132 | 165 | ||
133 | enum print_line_t | 166 | static enum print_line_t |
134 | print_syscall_exit(struct trace_iterator *iter, int flags, | 167 | print_syscall_exit(struct trace_iterator *iter, int flags, |
135 | struct trace_event *event) | 168 | struct trace_event *event) |
136 | { | 169 | { |
@@ -270,16 +303,16 @@ static int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
270 | return ret; | 303 | return ret; |
271 | } | 304 | } |
272 | 305 | ||
273 | void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) | 306 | static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) |
274 | { | 307 | { |
275 | struct syscall_trace_enter *entry; | 308 | struct syscall_trace_enter *entry; |
276 | struct syscall_metadata *sys_data; | 309 | struct syscall_metadata *sys_data; |
277 | struct ring_buffer_event *event; | 310 | struct ring_buffer_event *event; |
278 | struct ring_buffer *buffer; | 311 | struct ring_buffer *buffer; |
279 | int size; | ||
280 | int syscall_nr; | 312 | int syscall_nr; |
313 | int size; | ||
281 | 314 | ||
282 | syscall_nr = syscall_get_nr(current, regs); | 315 | syscall_nr = trace_get_syscall_nr(current, regs); |
283 | if (syscall_nr < 0) | 316 | if (syscall_nr < 0) |
284 | return; | 317 | return; |
285 | if (!test_bit(syscall_nr, enabled_enter_syscalls)) | 318 | if (!test_bit(syscall_nr, enabled_enter_syscalls)) |
@@ -305,7 +338,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
305 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 338 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); |
306 | } | 339 | } |
307 | 340 | ||
308 | void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | 341 | static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) |
309 | { | 342 | { |
310 | struct syscall_trace_exit *entry; | 343 | struct syscall_trace_exit *entry; |
311 | struct syscall_metadata *sys_data; | 344 | struct syscall_metadata *sys_data; |
@@ -313,7 +346,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
313 | struct ring_buffer *buffer; | 346 | struct ring_buffer *buffer; |
314 | int syscall_nr; | 347 | int syscall_nr; |
315 | 348 | ||
316 | syscall_nr = syscall_get_nr(current, regs); | 349 | syscall_nr = trace_get_syscall_nr(current, regs); |
317 | if (syscall_nr < 0) | 350 | if (syscall_nr < 0) |
318 | return; | 351 | return; |
319 | if (!test_bit(syscall_nr, enabled_exit_syscalls)) | 352 | if (!test_bit(syscall_nr, enabled_exit_syscalls)) |
@@ -337,7 +370,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
337 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 370 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); |
338 | } | 371 | } |
339 | 372 | ||
340 | int reg_event_syscall_enter(struct ftrace_event_call *call) | 373 | static int reg_event_syscall_enter(struct ftrace_event_call *call) |
341 | { | 374 | { |
342 | int ret = 0; | 375 | int ret = 0; |
343 | int num; | 376 | int num; |
@@ -356,7 +389,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) | |||
356 | return ret; | 389 | return ret; |
357 | } | 390 | } |
358 | 391 | ||
359 | void unreg_event_syscall_enter(struct ftrace_event_call *call) | 392 | static void unreg_event_syscall_enter(struct ftrace_event_call *call) |
360 | { | 393 | { |
361 | int num; | 394 | int num; |
362 | 395 | ||
@@ -371,7 +404,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call) | |||
371 | mutex_unlock(&syscall_trace_lock); | 404 | mutex_unlock(&syscall_trace_lock); |
372 | } | 405 | } |
373 | 406 | ||
374 | int reg_event_syscall_exit(struct ftrace_event_call *call) | 407 | static int reg_event_syscall_exit(struct ftrace_event_call *call) |
375 | { | 408 | { |
376 | int ret = 0; | 409 | int ret = 0; |
377 | int num; | 410 | int num; |
@@ -390,7 +423,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) | |||
390 | return ret; | 423 | return ret; |
391 | } | 424 | } |
392 | 425 | ||
393 | void unreg_event_syscall_exit(struct ftrace_event_call *call) | 426 | static void unreg_event_syscall_exit(struct ftrace_event_call *call) |
394 | { | 427 | { |
395 | int num; | 428 | int num; |
396 | 429 | ||
@@ -459,7 +492,7 @@ unsigned long __init __weak arch_syscall_addr(int nr) | |||
459 | return (unsigned long)sys_call_table[nr]; | 492 | return (unsigned long)sys_call_table[nr]; |
460 | } | 493 | } |
461 | 494 | ||
462 | int __init init_ftrace_syscalls(void) | 495 | static int __init init_ftrace_syscalls(void) |
463 | { | 496 | { |
464 | struct syscall_metadata *meta; | 497 | struct syscall_metadata *meta; |
465 | unsigned long addr; | 498 | unsigned long addr; |
@@ -502,7 +535,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
502 | int rctx; | 535 | int rctx; |
503 | int size; | 536 | int size; |
504 | 537 | ||
505 | syscall_nr = syscall_get_nr(current, regs); | 538 | syscall_nr = trace_get_syscall_nr(current, regs); |
506 | if (syscall_nr < 0) | 539 | if (syscall_nr < 0) |
507 | return; | 540 | return; |
508 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) | 541 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
@@ -578,7 +611,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
578 | int rctx; | 611 | int rctx; |
579 | int size; | 612 | int size; |
580 | 613 | ||
581 | syscall_nr = syscall_get_nr(current, regs); | 614 | syscall_nr = trace_get_syscall_nr(current, regs); |
582 | if (syscall_nr < 0) | 615 | if (syscall_nr < 0) |
583 | return; | 616 | return; |
584 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) | 617 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |