aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r--kernel/trace/trace.h49
1 files changed, 49 insertions, 0 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9911277b268b..cc14a6bc1094 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -416,8 +416,57 @@ enum trace_iterator_flags {
416 TRACE_ITER_STACKTRACE = 0x100, 416 TRACE_ITER_STACKTRACE = 0x100,
417 TRACE_ITER_SCHED_TREE = 0x200, 417 TRACE_ITER_SCHED_TREE = 0x200,
418 TRACE_ITER_PRINTK = 0x400, 418 TRACE_ITER_PRINTK = 0x400,
419 TRACE_ITER_PREEMPTONLY = 0x800,
419}; 420};
420 421
421extern struct tracer nop_trace; 422extern struct tracer nop_trace;
422 423
424/**
425 * ftrace_preempt_disable - disable preemption scheduler safe
426 *
427 * When tracing can happen inside the scheduler, there exists
428 * cases that the tracing might happen before the need_resched
429 * flag is checked. If this happens and the tracer calls
430 * preempt_enable (after a disable), a schedule might take place
431 * causing an infinite recursion.
432 *
433 * To prevent this, we read the need_recshed flag before
434 * disabling preemption. When we want to enable preemption we
435 * check the flag, if it is set, then we call preempt_enable_no_resched.
436 * Otherwise, we call preempt_enable.
437 *
438 * The rational for doing the above is that if need resched is set
439 * and we have yet to reschedule, we are either in an atomic location
440 * (where we do not need to check for scheduling) or we are inside
441 * the scheduler and do not want to resched.
442 */
443static inline int ftrace_preempt_disable(void)
444{
445 int resched;
446
447 resched = need_resched();
448 preempt_disable_notrace();
449
450 return resched;
451}
452
453/**
454 * ftrace_preempt_enable - enable preemption scheduler safe
455 * @resched: the return value from ftrace_preempt_disable
456 *
457 * This is a scheduler safe way to enable preemption and not miss
458 * any preemption checks. The disabled saved the state of preemption.
459 * If resched is set, then we were either inside an atomic or
460 * are inside the scheduler (we would have already scheduled
461 * otherwise). In this case, we do not want to call normal
462 * preempt_enable, but preempt_enable_no_resched instead.
463 */
464static inline void ftrace_preempt_enable(int resched)
465{
466 if (resched)
467 preempt_enable_no_resched_notrace();
468 else
469 preempt_enable_notrace();
470}
471
423#endif /* _LINUX_KERNEL_TRACE_H */ 472#endif /* _LINUX_KERNEL_TRACE_H */