diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-11-07 22:36:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-08 03:51:49 -0500 |
commit | 769c48eb2530c5c1a393e2c82063f4f050571d24 (patch) | |
tree | a32f0718b42ab189c5547dccb0d0c7bb7cfe5c4c /kernel/trace/trace_selftest.c | |
parent | 7d5222a6afa4e429f55df8c086adb747837cbdf5 (diff) |
ftrace: force pass of preemptoff selftest
Impact: preemptoff not tested in selftest
Due to the BKL not being preemptable anymore, the selftest of the
preemptoff code can not be tested. It requires that it is called
with preemption enabled, but since the BKL is held, that is no
longer the case.
This patch simply skips those tests if it detects that the context
is not preemptable. The following will now show up in the tests:
Testing tracer preemptoff: can not test ... force PASSED
Testing tracer preemptirqsoff: can not test ... force PASSED
When the BKL is removed, or it becomes preemptable once again, then
the tests will be performed.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r-- | kernel/trace/trace_selftest.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index ea4e5d3b15df..0728a105dcc1 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -257,6 +257,19 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
257 | unsigned long count; | 257 | unsigned long count; |
258 | int ret; | 258 | int ret; |
259 | 259 | ||
260 | /* | ||
261 | * Now that the big kernel lock is no longer preemptable, | ||
262 | * and this is called with the BKL held, it will always | ||
263 | * fail. If preemption is already disabled, simply | ||
264 | * pass the test. When the BKL is removed, or becomes | ||
265 | * preemptible again, we will once again test this, | ||
266 | * so keep it in. | ||
267 | */ | ||
268 | if (preempt_count()) { | ||
269 | printk(KERN_CONT "can not test ... force "); | ||
270 | return 0; | ||
271 | } | ||
272 | |||
260 | /* start the tracing */ | 273 | /* start the tracing */ |
261 | trace->init(tr); | 274 | trace->init(tr); |
262 | /* reset the max latency */ | 275 | /* reset the max latency */ |
@@ -293,6 +306,19 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
293 | unsigned long count; | 306 | unsigned long count; |
294 | int ret; | 307 | int ret; |
295 | 308 | ||
309 | /* | ||
310 | * Now that the big kernel lock is no longer preemptable, | ||
311 | * and this is called with the BKL held, it will always | ||
312 | * fail. If preemption is already disabled, simply | ||
313 | * pass the test. When the BKL is removed, or becomes | ||
314 | * preemptible again, we will once again test this, | ||
315 | * so keep it in. | ||
316 | */ | ||
317 | if (preempt_count()) { | ||
318 | printk(KERN_CONT "can not test ... force "); | ||
319 | return 0; | ||
320 | } | ||
321 | |||
296 | /* start the tracing */ | 322 | /* start the tracing */ |
297 | trace->init(tr); | 323 | trace->init(tr); |
298 | 324 | ||