aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c65
-rw-r--r--kernel/trace/trace_selftest.c33
4 files changed, 82 insertions, 20 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7181ad15923b..cd7f76d1eb86 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -809,7 +809,7 @@ static int ftrace_profile_init(void)
809 int cpu; 809 int cpu;
810 int ret = 0; 810 int ret = 0;
811 811
812 for_each_online_cpu(cpu) { 812 for_each_possible_cpu(cpu) {
813 ret = ftrace_profile_init_cpu(cpu); 813 ret = ftrace_profile_init_cpu(cpu);
814 if (ret) 814 if (ret)
815 break; 815 break;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cc2f66f68dc5..294b8a271a04 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2558,7 +2558,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2558 if (unlikely(test_time_stamp(delta))) { 2558 if (unlikely(test_time_stamp(delta))) {
2559 int local_clock_stable = 1; 2559 int local_clock_stable = 1;
2560#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2560#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2561 local_clock_stable = sched_clock_stable; 2561 local_clock_stable = sched_clock_stable();
2562#endif 2562#endif
2563 WARN_ONCE(delta > (1ULL << 59), 2563 WARN_ONCE(delta > (1ULL << 59),
2564 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", 2564 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index fee77e15d815..6e32635e5e57 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -16,6 +16,7 @@
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/sched/rt.h> 18#include <linux/sched/rt.h>
19#include <linux/sched/deadline.h>
19#include <trace/events/sched.h> 20#include <trace/events/sched.h>
20#include "trace.h" 21#include "trace.h"
21 22
@@ -27,6 +28,8 @@ static int wakeup_cpu;
27static int wakeup_current_cpu; 28static int wakeup_current_cpu;
28static unsigned wakeup_prio = -1; 29static unsigned wakeup_prio = -1;
29static int wakeup_rt; 30static int wakeup_rt;
31static int wakeup_dl;
32static int tracing_dl = 0;
30 33
31static arch_spinlock_t wakeup_lock = 34static arch_spinlock_t wakeup_lock =
32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 35 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -437,6 +440,7 @@ static void __wakeup_reset(struct trace_array *tr)
437{ 440{
438 wakeup_cpu = -1; 441 wakeup_cpu = -1;
439 wakeup_prio = -1; 442 wakeup_prio = -1;
443 tracing_dl = 0;
440 444
441 if (wakeup_task) 445 if (wakeup_task)
442 put_task_struct(wakeup_task); 446 put_task_struct(wakeup_task);
@@ -472,9 +476,17 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
472 tracing_record_cmdline(p); 476 tracing_record_cmdline(p);
473 tracing_record_cmdline(current); 477 tracing_record_cmdline(current);
474 478
475 if ((wakeup_rt && !rt_task(p)) || 479 /*
476 p->prio >= wakeup_prio || 480 * Semantic is like this:
477 p->prio >= current->prio) 481 * - wakeup tracer handles all tasks in the system, independently
482 * from their scheduling class;
483 * - wakeup_rt tracer handles tasks belonging to sched_dl and
484 * sched_rt class;
485 * - wakeup_dl handles tasks belonging to sched_dl class only.
486 */
487 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
488 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
489 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
478 return; 490 return;
479 491
480 pc = preempt_count(); 492 pc = preempt_count();
@@ -486,7 +498,8 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
486 arch_spin_lock(&wakeup_lock); 498 arch_spin_lock(&wakeup_lock);
487 499
488 /* check for races. */ 500 /* check for races. */
489 if (!tracer_enabled || p->prio >= wakeup_prio) 501 if (!tracer_enabled || tracing_dl ||
502 (!dl_task(p) && p->prio >= wakeup_prio))
490 goto out_locked; 503 goto out_locked;
491 504
492 /* reset the trace */ 505 /* reset the trace */
@@ -496,6 +509,15 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
496 wakeup_current_cpu = wakeup_cpu; 509 wakeup_current_cpu = wakeup_cpu;
497 wakeup_prio = p->prio; 510 wakeup_prio = p->prio;
498 511
512 /*
513 * Once you start tracing a -deadline task, don't bother tracing
514 * another task until the first one wakes up.
515 */
516 if (dl_task(p))
517 tracing_dl = 1;
518 else
519 tracing_dl = 0;
520
499 wakeup_task = p; 521 wakeup_task = p;
500 get_task_struct(wakeup_task); 522 get_task_struct(wakeup_task);
501 523
@@ -597,16 +619,25 @@ static int __wakeup_tracer_init(struct trace_array *tr)
597 619
598static int wakeup_tracer_init(struct trace_array *tr) 620static int wakeup_tracer_init(struct trace_array *tr)
599{ 621{
622 wakeup_dl = 0;
600 wakeup_rt = 0; 623 wakeup_rt = 0;
601 return __wakeup_tracer_init(tr); 624 return __wakeup_tracer_init(tr);
602} 625}
603 626
604static int wakeup_rt_tracer_init(struct trace_array *tr) 627static int wakeup_rt_tracer_init(struct trace_array *tr)
605{ 628{
629 wakeup_dl = 0;
606 wakeup_rt = 1; 630 wakeup_rt = 1;
607 return __wakeup_tracer_init(tr); 631 return __wakeup_tracer_init(tr);
608} 632}
609 633
634static int wakeup_dl_tracer_init(struct trace_array *tr)
635{
636 wakeup_dl = 1;
637 wakeup_rt = 0;
638 return __wakeup_tracer_init(tr);
639}
640
610static void wakeup_tracer_reset(struct trace_array *tr) 641static void wakeup_tracer_reset(struct trace_array *tr)
611{ 642{
612 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; 643 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
@@ -674,6 +705,28 @@ static struct tracer wakeup_rt_tracer __read_mostly =
674 .use_max_tr = true, 705 .use_max_tr = true,
675}; 706};
676 707
708static struct tracer wakeup_dl_tracer __read_mostly =
709{
710 .name = "wakeup_dl",
711 .init = wakeup_dl_tracer_init,
712 .reset = wakeup_tracer_reset,
713 .start = wakeup_tracer_start,
714 .stop = wakeup_tracer_stop,
715 .wait_pipe = poll_wait_pipe,
716 .print_max = true,
717 .print_header = wakeup_print_header,
718 .print_line = wakeup_print_line,
719 .flags = &tracer_flags,
720 .set_flag = wakeup_set_flag,
721 .flag_changed = wakeup_flag_changed,
722#ifdef CONFIG_FTRACE_SELFTEST
723 .selftest = trace_selftest_startup_wakeup,
724#endif
725 .open = wakeup_trace_open,
726 .close = wakeup_trace_close,
727 .use_max_tr = true,
728};
729
677__init static int init_wakeup_tracer(void) 730__init static int init_wakeup_tracer(void)
678{ 731{
679 int ret; 732 int ret;
@@ -686,6 +739,10 @@ __init static int init_wakeup_tracer(void)
686 if (ret) 739 if (ret)
687 return ret; 740 return ret;
688 741
742 ret = register_tracer(&wakeup_dl_tracer);
743 if (ret)
744 return ret;
745
689 return 0; 746 return 0;
690} 747}
691core_initcall(init_wakeup_tracer); 748core_initcall(init_wakeup_tracer);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index a7329b7902f8..e98fca60974f 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1022,11 +1022,16 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1022#ifdef CONFIG_SCHED_TRACER 1022#ifdef CONFIG_SCHED_TRACER
1023static int trace_wakeup_test_thread(void *data) 1023static int trace_wakeup_test_thread(void *data)
1024{ 1024{
1025 /* Make this a RT thread, doesn't need to be too high */ 1025 /* Make this a -deadline thread */
1026 static const struct sched_param param = { .sched_priority = 5 }; 1026 static const struct sched_attr attr = {
1027 .sched_policy = SCHED_DEADLINE,
1028 .sched_runtime = 100000ULL,
1029 .sched_deadline = 10000000ULL,
1030 .sched_period = 10000000ULL
1031 };
1027 struct completion *x = data; 1032 struct completion *x = data;
1028 1033
1029 sched_setscheduler(current, SCHED_FIFO, &param); 1034 sched_setattr(current, &attr);
1030 1035
1031 /* Make it know we have a new prio */ 1036 /* Make it know we have a new prio */
1032 complete(x); 1037 complete(x);
@@ -1040,8 +1045,8 @@ static int trace_wakeup_test_thread(void *data)
1040 /* we are awake, now wait to disappear */ 1045 /* we are awake, now wait to disappear */
1041 while (!kthread_should_stop()) { 1046 while (!kthread_should_stop()) {
1042 /* 1047 /*
1043 * This is an RT task, do short sleeps to let 1048 * This will likely be the system top priority
1044 * others run. 1049 * task, do short sleeps to let others run.
1045 */ 1050 */
1046 msleep(100); 1051 msleep(100);
1047 } 1052 }
@@ -1054,21 +1059,21 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1054{ 1059{
1055 unsigned long save_max = tracing_max_latency; 1060 unsigned long save_max = tracing_max_latency;
1056 struct task_struct *p; 1061 struct task_struct *p;
1057 struct completion isrt; 1062 struct completion is_ready;
1058 unsigned long count; 1063 unsigned long count;
1059 int ret; 1064 int ret;
1060 1065
1061 init_completion(&isrt); 1066 init_completion(&is_ready);
1062 1067
1063 /* create a high prio thread */ 1068 /* create a -deadline thread */
1064 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); 1069 p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test");
1065 if (IS_ERR(p)) { 1070 if (IS_ERR(p)) {
1066 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 1071 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1067 return -1; 1072 return -1;
1068 } 1073 }
1069 1074
1070 /* make sure the thread is running at an RT prio */ 1075 /* make sure the thread is running at -deadline policy */
1071 wait_for_completion(&isrt); 1076 wait_for_completion(&is_ready);
1072 1077
1073 /* start the tracing */ 1078 /* start the tracing */
1074 ret = tracer_init(trace, tr); 1079 ret = tracer_init(trace, tr);
@@ -1082,19 +1087,19 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1082 1087
1083 while (p->on_rq) { 1088 while (p->on_rq) {
1084 /* 1089 /*
1085 * Sleep to make sure the RT thread is asleep too. 1090 * Sleep to make sure the -deadline thread is asleep too.
1086 * On virtual machines we can't rely on timings, 1091 * On virtual machines we can't rely on timings,
1087 * but we want to make sure this test still works. 1092 * but we want to make sure this test still works.
1088 */ 1093 */
1089 msleep(100); 1094 msleep(100);
1090 } 1095 }
1091 1096
1092 init_completion(&isrt); 1097 init_completion(&is_ready);
1093 1098
1094 wake_up_process(p); 1099 wake_up_process(p);
1095 1100
1096 /* Wait for the task to wake up */ 1101 /* Wait for the task to wake up */
1097 wait_for_completion(&isrt); 1102 wait_for_completion(&is_ready);
1098 1103
1099 /* stop the tracing. */ 1104 /* stop the tracing. */
1100 tracing_stop(); 1105 tracing_stop();