aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorWenji Huang <wenji.huang@oracle.com>2009-02-06 04:33:27 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-07 20:03:36 -0500
commit57794a9d48b63e34acbe63282628c9f029603308 (patch)
treeed42d073f82cd91b8d8a331c60814aa699c3293d /kernel/trace
parenta81bd80a0b0a405dc0483e2c428332d69da2c79f (diff)
trace: trivial fixes in comment typos.
Impact: clean up Fixed several typos in the comments. Signed-off-by: Wenji Huang <wenji.huang@oracle.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace.h6
2 files changed, 6 insertions, 6 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 68610031780b..1796e018fbff 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -465,7 +465,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
465 * it is not enabled then do nothing. 465 * it is not enabled then do nothing.
466 * 466 *
467 * If this record is not to be traced and 467 * If this record is not to be traced and
468 * it is enabled then disabled it. 468 * it is enabled then disable it.
469 * 469 *
470 */ 470 */
471 if (rec->flags & FTRACE_FL_NOTRACE) { 471 if (rec->flags & FTRACE_FL_NOTRACE) {
@@ -485,7 +485,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
485 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) 485 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
486 return 0; 486 return 0;
487 487
488 /* Record is not filtered and is not enabled do nothing */ 488 /* Record is not filtered or enabled, do nothing */
489 if (!fl) 489 if (!fl)
490 return 0; 490 return 0;
491 491
@@ -507,7 +507,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
507 507
508 } else { 508 } else {
509 509
510 /* if record is not enabled do nothing */ 510 /* if record is not enabled, do nothing */
511 if (!(rec->flags & FTRACE_FL_ENABLED)) 511 if (!(rec->flags & FTRACE_FL_ENABLED))
512 return 0; 512 return 0;
513 513
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5efc4c707f7e..f92aba52a894 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -616,12 +616,12 @@ extern struct tracer nop_trace;
616 * preempt_enable (after a disable), a schedule might take place 616 * preempt_enable (after a disable), a schedule might take place
617 * causing an infinite recursion. 617 * causing an infinite recursion.
618 * 618 *
619 * To prevent this, we read the need_recshed flag before 619 * To prevent this, we read the need_resched flag before
620 * disabling preemption. When we want to enable preemption we 620 * disabling preemption. When we want to enable preemption we
621 * check the flag, if it is set, then we call preempt_enable_no_resched. 621 * check the flag, if it is set, then we call preempt_enable_no_resched.
622 * Otherwise, we call preempt_enable. 622 * Otherwise, we call preempt_enable.
623 * 623 *
624 * The rational for doing the above is that if need resched is set 624 * The rational for doing the above is that if need_resched is set
625 * and we have yet to reschedule, we are either in an atomic location 625 * and we have yet to reschedule, we are either in an atomic location
626 * (where we do not need to check for scheduling) or we are inside 626 * (where we do not need to check for scheduling) or we are inside
627 * the scheduler and do not want to resched. 627 * the scheduler and do not want to resched.
@@ -642,7 +642,7 @@ static inline int ftrace_preempt_disable(void)
642 * 642 *
643 * This is a scheduler safe way to enable preemption and not miss 643 * This is a scheduler safe way to enable preemption and not miss
644 * any preemption checks. The disabled saved the state of preemption. 644 * any preemption checks. The disabled saved the state of preemption.
645 * If resched is set, then we were either inside an atomic or 645 * If resched is set, then we are either inside an atomic or
646 * are inside the scheduler (we would have already scheduled 646 * are inside the scheduler (we would have already scheduled
647 * otherwise). In this case, we do not want to call normal 647 * otherwise). In this case, we do not want to call normal
648 * preempt_enable, but preempt_enable_no_resched instead. 648 * preempt_enable, but preempt_enable_no_resched instead.