diff options
author | Ian Munsie <imunsie@au1.ibm.com> | 2011-02-02 22:27:21 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2011-02-07 21:25:52 -0500 |
commit | 3773b389b6927595512558594d040c1edba46f36 (patch) | |
tree | 5099c6ea72db515179ef568c2fea9765f51bc6f0 | |
parent | ba976970c79fd2fbfe1a4b3b6766a318f4eb9d4c (diff) |
tracing/syscalls: Convert redundant syscall_nr checks into WARN_ON
With the ftrace events now checking if the syscall_nr is valid upon
initialisation it should no longer be possible to register or unregister
a syscall event without a valid syscall_nr since they should not be
created. This adds a WARN_ON_ONCE in the register and unregister
functions to locate potential regressions in the future.
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
LKML-Reference: <1296703645-18718-3-git-send-email-imunsie@au1.ibm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/trace_syscalls.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index a9ceabd52247..423094288fb5 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -359,7 +359,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) | |||
359 | int num; | 359 | int num; |
360 | 360 | ||
361 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 361 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
362 | if (num < 0 || num >= NR_syscalls) | 362 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
363 | return -ENOSYS; | 363 | return -ENOSYS; |
364 | mutex_lock(&syscall_trace_lock); | 364 | mutex_lock(&syscall_trace_lock); |
365 | if (!sys_refcount_enter) | 365 | if (!sys_refcount_enter) |
@@ -377,7 +377,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call) | |||
377 | int num; | 377 | int num; |
378 | 378 | ||
379 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 379 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
380 | if (num < 0 || num >= NR_syscalls) | 380 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
381 | return; | 381 | return; |
382 | mutex_lock(&syscall_trace_lock); | 382 | mutex_lock(&syscall_trace_lock); |
383 | sys_refcount_enter--; | 383 | sys_refcount_enter--; |
@@ -393,7 +393,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) | |||
393 | int num; | 393 | int num; |
394 | 394 | ||
395 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 395 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
396 | if (num < 0 || num >= NR_syscalls) | 396 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
397 | return -ENOSYS; | 397 | return -ENOSYS; |
398 | mutex_lock(&syscall_trace_lock); | 398 | mutex_lock(&syscall_trace_lock); |
399 | if (!sys_refcount_exit) | 399 | if (!sys_refcount_exit) |
@@ -411,7 +411,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) | |||
411 | int num; | 411 | int num; |
412 | 412 | ||
413 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 413 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
414 | if (num < 0 || num >= NR_syscalls) | 414 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
415 | return; | 415 | return; |
416 | mutex_lock(&syscall_trace_lock); | 416 | mutex_lock(&syscall_trace_lock); |
417 | sys_refcount_exit--; | 417 | sys_refcount_exit--; |