aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_debug.c')
-rw-r--r--kernel/sched_debug.c123
1 files changed, 75 insertions, 48 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 2e1b0d17dd9b..eb6cb8edd075 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -16,6 +16,8 @@
16#include <linux/kallsyms.h> 16#include <linux/kallsyms.h>
17#include <linux/utsname.h> 17#include <linux/utsname.h>
18 18
19static DEFINE_SPINLOCK(sched_debug_lock);
20
19/* 21/*
20 * This allows printing both to /proc/sched_debug and 22 * This allows printing both to /proc/sched_debug and
21 * to the console 23 * to the console
@@ -54,8 +56,7 @@ static unsigned long nsec_low(unsigned long long nsec)
54#define SPLIT_NS(x) nsec_high(x), nsec_low(x) 56#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
55 57
56#ifdef CONFIG_FAIR_GROUP_SCHED 58#ifdef CONFIG_FAIR_GROUP_SCHED
57static void print_cfs_group_stats(struct seq_file *m, int cpu, 59static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
58 struct task_group *tg)
59{ 60{
60 struct sched_entity *se = tg->se[cpu]; 61 struct sched_entity *se = tg->se[cpu];
61 if (!se) 62 if (!se)
@@ -87,6 +88,26 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu,
87} 88}
88#endif 89#endif
89 90
91#ifdef CONFIG_CGROUP_SCHED
92static char group_path[PATH_MAX];
93
94static char *task_group_path(struct task_group *tg)
95{
96 if (autogroup_path(tg, group_path, PATH_MAX))
97 return group_path;
98
99 /*
100 * May be NULL if the underlying cgroup isn't fully-created yet
101 */
102 if (!tg->css.cgroup) {
103 group_path[0] = '\0';
104 return group_path;
105 }
106 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
107 return group_path;
108}
109#endif
110
90static void 111static void
91print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 112print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
92{ 113{
@@ -109,17 +130,10 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", 130 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); 131 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
111#endif 132#endif
112
113#ifdef CONFIG_CGROUP_SCHED 133#ifdef CONFIG_CGROUP_SCHED
114 { 134 SEQ_printf(m, " %s", task_group_path(task_group(p)));
115 char path[64];
116
117 rcu_read_lock();
118 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
119 rcu_read_unlock();
120 SEQ_printf(m, " %s", path);
121 }
122#endif 135#endif
136
123 SEQ_printf(m, "\n"); 137 SEQ_printf(m, "\n");
124} 138}
125 139
@@ -147,19 +161,6 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
147 read_unlock_irqrestore(&tasklist_lock, flags); 161 read_unlock_irqrestore(&tasklist_lock, flags);
148} 162}
149 163
150#if defined(CONFIG_CGROUP_SCHED) && \
151 (defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
152static void task_group_path(struct task_group *tg, char *buf, int buflen)
153{
154 /* may be NULL if the underlying cgroup isn't fully-created yet */
155 if (!tg->css.cgroup) {
156 buf[0] = '\0';
157 return;
158 }
159 cgroup_path(tg->css.cgroup, buf, buflen);
160}
161#endif
162
163void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 164void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
164{ 165{
165 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 166 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -168,13 +169,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
168 struct sched_entity *last; 169 struct sched_entity *last;
169 unsigned long flags; 170 unsigned long flags;
170 171
171#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) 172#ifdef CONFIG_FAIR_GROUP_SCHED
172 char path[128]; 173 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
173 struct task_group *tg = cfs_rq->tg;
174
175 task_group_path(tg, path, sizeof(path));
176
177 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
178#else 174#else
179 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 175 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
180#endif 176#endif
@@ -202,33 +198,34 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
202 spread0 = min_vruntime - rq0_min_vruntime; 198 spread0 = min_vruntime - rq0_min_vruntime;
203 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", 199 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
204 SPLIT_NS(spread0)); 200 SPLIT_NS(spread0));
205 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
206 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
207
208 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", 201 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
209 cfs_rq->nr_spread_over); 202 cfs_rq->nr_spread_over);
203 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
204 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
210#ifdef CONFIG_FAIR_GROUP_SCHED 205#ifdef CONFIG_FAIR_GROUP_SCHED
211#ifdef CONFIG_SMP 206#ifdef CONFIG_SMP
212 SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); 207 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_avg",
208 SPLIT_NS(cfs_rq->load_avg));
209 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_period",
210 SPLIT_NS(cfs_rq->load_period));
211 SEQ_printf(m, " .%-30s: %ld\n", "load_contrib",
212 cfs_rq->load_contribution);
213 SEQ_printf(m, " .%-30s: %d\n", "load_tg",
214 atomic_read(&cfs_rq->tg->load_weight));
213#endif 215#endif
216
214 print_cfs_group_stats(m, cpu, cfs_rq->tg); 217 print_cfs_group_stats(m, cpu, cfs_rq->tg);
215#endif 218#endif
216} 219}
217 220
218void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 221void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
219{ 222{
220#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) 223#ifdef CONFIG_RT_GROUP_SCHED
221 char path[128]; 224 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
222 struct task_group *tg = rt_rq->tg;
223
224 task_group_path(tg, path, sizeof(path));
225
226 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
227#else 225#else
228 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); 226 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
229#endif 227#endif
230 228
231
232#define P(x) \ 229#define P(x) \
233 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) 230 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
234#define PN(x) \ 231#define PN(x) \
@@ -243,9 +240,12 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
243#undef P 240#undef P
244} 241}
245 242
243extern __read_mostly int sched_clock_running;
244
246static void print_cpu(struct seq_file *m, int cpu) 245static void print_cpu(struct seq_file *m, int cpu)
247{ 246{
248 struct rq *rq = cpu_rq(cpu); 247 struct rq *rq = cpu_rq(cpu);
248 unsigned long flags;
249 249
250#ifdef CONFIG_X86 250#ifdef CONFIG_X86
251 { 251 {
@@ -296,14 +296,20 @@ static void print_cpu(struct seq_file *m, int cpu)
296 P(ttwu_count); 296 P(ttwu_count);
297 P(ttwu_local); 297 P(ttwu_local);
298 298
299 P(bkl_count); 299 SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
300 rq->rq_sched_info.bkl_count);
300 301
301#undef P 302#undef P
303#undef P64
302#endif 304#endif
305 spin_lock_irqsave(&sched_debug_lock, flags);
303 print_cfs_stats(m, cpu); 306 print_cfs_stats(m, cpu);
304 print_rt_stats(m, cpu); 307 print_rt_stats(m, cpu);
305 308
309 rcu_read_lock();
306 print_rq(m, rq, cpu); 310 print_rq(m, rq, cpu);
311 rcu_read_unlock();
312 spin_unlock_irqrestore(&sched_debug_lock, flags);
307} 313}
308 314
309static const char *sched_tunable_scaling_names[] = { 315static const char *sched_tunable_scaling_names[] = {
@@ -314,21 +320,42 @@ static const char *sched_tunable_scaling_names[] = {
314 320
315static int sched_debug_show(struct seq_file *m, void *v) 321static int sched_debug_show(struct seq_file *m, void *v)
316{ 322{
317 u64 now = ktime_to_ns(ktime_get()); 323 u64 ktime, sched_clk, cpu_clk;
324 unsigned long flags;
318 int cpu; 325 int cpu;
319 326
320 SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n", 327 local_irq_save(flags);
328 ktime = ktime_to_ns(ktime_get());
329 sched_clk = sched_clock();
330 cpu_clk = local_clock();
331 local_irq_restore(flags);
332
333 SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
321 init_utsname()->release, 334 init_utsname()->release,
322 (int)strcspn(init_utsname()->version, " "), 335 (int)strcspn(init_utsname()->version, " "),
323 init_utsname()->version); 336 init_utsname()->version);
324 337
325 SEQ_printf(m, "now at %Lu.%06ld msecs\n", SPLIT_NS(now)); 338#define P(x) \
339 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
340#define PN(x) \
341 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
342 PN(ktime);
343 PN(sched_clk);
344 PN(cpu_clk);
345 P(jiffies);
346#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
347 P(sched_clock_stable);
348#endif
349#undef PN
350#undef P
351
352 SEQ_printf(m, "\n");
353 SEQ_printf(m, "sysctl_sched\n");
326 354
327#define P(x) \ 355#define P(x) \
328 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 356 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
329#define PN(x) \ 357#define PN(x) \
330 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 358 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
331 P(jiffies);
332 PN(sysctl_sched_latency); 359 PN(sysctl_sched_latency);
333 PN(sysctl_sched_min_granularity); 360 PN(sysctl_sched_min_granularity);
334 PN(sysctl_sched_wakeup_granularity); 361 PN(sysctl_sched_wakeup_granularity);