diff options
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r-- | kernel/trace/trace_syscalls.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index d036a74a64f3..b8d30e7ecd05 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -247,7 +247,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
247 | return ret; | 247 | return ret; |
248 | } | 248 | } |
249 | 249 | ||
250 | void ftrace_syscall_enter(struct pt_regs *regs, long id) | 250 | void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) |
251 | { | 251 | { |
252 | struct syscall_trace_enter *entry; | 252 | struct syscall_trace_enter *entry; |
253 | struct syscall_metadata *sys_data; | 253 | struct syscall_metadata *sys_data; |
@@ -282,7 +282,7 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) | |||
282 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 282 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); |
283 | } | 283 | } |
284 | 284 | ||
285 | void ftrace_syscall_exit(struct pt_regs *regs, long ret) | 285 | void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) |
286 | { | 286 | { |
287 | struct syscall_trace_exit *entry; | 287 | struct syscall_trace_exit *entry; |
288 | struct syscall_metadata *sys_data; | 288 | struct syscall_metadata *sys_data; |
@@ -324,7 +324,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) | |||
324 | return -ENOSYS; | 324 | return -ENOSYS; |
325 | mutex_lock(&syscall_trace_lock); | 325 | mutex_lock(&syscall_trace_lock); |
326 | if (!sys_refcount_enter) | 326 | if (!sys_refcount_enter) |
327 | ret = register_trace_sys_enter(ftrace_syscall_enter); | 327 | ret = register_trace_sys_enter(ftrace_syscall_enter, NULL); |
328 | if (!ret) { | 328 | if (!ret) { |
329 | set_bit(num, enabled_enter_syscalls); | 329 | set_bit(num, enabled_enter_syscalls); |
330 | sys_refcount_enter++; | 330 | sys_refcount_enter++; |
@@ -344,7 +344,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call) | |||
344 | sys_refcount_enter--; | 344 | sys_refcount_enter--; |
345 | clear_bit(num, enabled_enter_syscalls); | 345 | clear_bit(num, enabled_enter_syscalls); |
346 | if (!sys_refcount_enter) | 346 | if (!sys_refcount_enter) |
347 | unregister_trace_sys_enter(ftrace_syscall_enter); | 347 | unregister_trace_sys_enter(ftrace_syscall_enter, NULL); |
348 | mutex_unlock(&syscall_trace_lock); | 348 | mutex_unlock(&syscall_trace_lock); |
349 | } | 349 | } |
350 | 350 | ||
@@ -358,7 +358,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) | |||
358 | return -ENOSYS; | 358 | return -ENOSYS; |
359 | mutex_lock(&syscall_trace_lock); | 359 | mutex_lock(&syscall_trace_lock); |
360 | if (!sys_refcount_exit) | 360 | if (!sys_refcount_exit) |
361 | ret = register_trace_sys_exit(ftrace_syscall_exit); | 361 | ret = register_trace_sys_exit(ftrace_syscall_exit, NULL); |
362 | if (!ret) { | 362 | if (!ret) { |
363 | set_bit(num, enabled_exit_syscalls); | 363 | set_bit(num, enabled_exit_syscalls); |
364 | sys_refcount_exit++; | 364 | sys_refcount_exit++; |
@@ -378,7 +378,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) | |||
378 | sys_refcount_exit--; | 378 | sys_refcount_exit--; |
379 | clear_bit(num, enabled_exit_syscalls); | 379 | clear_bit(num, enabled_exit_syscalls); |
380 | if (!sys_refcount_exit) | 380 | if (!sys_refcount_exit) |
381 | unregister_trace_sys_exit(ftrace_syscall_exit); | 381 | unregister_trace_sys_exit(ftrace_syscall_exit, NULL); |
382 | mutex_unlock(&syscall_trace_lock); | 382 | mutex_unlock(&syscall_trace_lock); |
383 | } | 383 | } |
384 | 384 | ||
@@ -438,7 +438,7 @@ static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); | |||
438 | static int sys_perf_refcount_enter; | 438 | static int sys_perf_refcount_enter; |
439 | static int sys_perf_refcount_exit; | 439 | static int sys_perf_refcount_exit; |
440 | 440 | ||
441 | static void perf_syscall_enter(struct pt_regs *regs, long id) | 441 | static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) |
442 | { | 442 | { |
443 | struct syscall_metadata *sys_data; | 443 | struct syscall_metadata *sys_data; |
444 | struct syscall_trace_enter *rec; | 444 | struct syscall_trace_enter *rec; |
@@ -484,7 +484,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call) | |||
484 | 484 | ||
485 | mutex_lock(&syscall_trace_lock); | 485 | mutex_lock(&syscall_trace_lock); |
486 | if (!sys_perf_refcount_enter) | 486 | if (!sys_perf_refcount_enter) |
487 | ret = register_trace_sys_enter(perf_syscall_enter); | 487 | ret = register_trace_sys_enter(perf_syscall_enter, NULL); |
488 | if (ret) { | 488 | if (ret) { |
489 | pr_info("event trace: Could not activate" | 489 | pr_info("event trace: Could not activate" |
490 | "syscall entry trace point"); | 490 | "syscall entry trace point"); |
@@ -506,11 +506,11 @@ void perf_sysenter_disable(struct ftrace_event_call *call) | |||
506 | sys_perf_refcount_enter--; | 506 | sys_perf_refcount_enter--; |
507 | clear_bit(num, enabled_perf_enter_syscalls); | 507 | clear_bit(num, enabled_perf_enter_syscalls); |
508 | if (!sys_perf_refcount_enter) | 508 | if (!sys_perf_refcount_enter) |
509 | unregister_trace_sys_enter(perf_syscall_enter); | 509 | unregister_trace_sys_enter(perf_syscall_enter, NULL); |
510 | mutex_unlock(&syscall_trace_lock); | 510 | mutex_unlock(&syscall_trace_lock); |
511 | } | 511 | } |
512 | 512 | ||
513 | static void perf_syscall_exit(struct pt_regs *regs, long ret) | 513 | static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) |
514 | { | 514 | { |
515 | struct syscall_metadata *sys_data; | 515 | struct syscall_metadata *sys_data; |
516 | struct syscall_trace_exit *rec; | 516 | struct syscall_trace_exit *rec; |
@@ -559,7 +559,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call) | |||
559 | 559 | ||
560 | mutex_lock(&syscall_trace_lock); | 560 | mutex_lock(&syscall_trace_lock); |
561 | if (!sys_perf_refcount_exit) | 561 | if (!sys_perf_refcount_exit) |
562 | ret = register_trace_sys_exit(perf_syscall_exit); | 562 | ret = register_trace_sys_exit(perf_syscall_exit, NULL); |
563 | if (ret) { | 563 | if (ret) { |
564 | pr_info("event trace: Could not activate" | 564 | pr_info("event trace: Could not activate" |
565 | "syscall exit trace point"); | 565 | "syscall exit trace point"); |
@@ -581,7 +581,7 @@ void perf_sysexit_disable(struct ftrace_event_call *call) | |||
581 | sys_perf_refcount_exit--; | 581 | sys_perf_refcount_exit--; |
582 | clear_bit(num, enabled_perf_exit_syscalls); | 582 | clear_bit(num, enabled_perf_exit_syscalls); |
583 | if (!sys_perf_refcount_exit) | 583 | if (!sys_perf_refcount_exit) |
584 | unregister_trace_sys_exit(perf_syscall_exit); | 584 | unregister_trace_sys_exit(perf_syscall_exit, NULL); |
585 | mutex_unlock(&syscall_trace_lock); | 585 | mutex_unlock(&syscall_trace_lock); |
586 | } | 586 | } |
587 | 587 | ||