diff options
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r-- | kernel/sched/debug.c | 428 |
1 files changed, 370 insertions, 58 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 641511771ae6..0368c393a336 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/kallsyms.h> | 16 | #include <linux/kallsyms.h> |
17 | #include <linux/utsname.h> | 17 | #include <linux/utsname.h> |
18 | #include <linux/mempolicy.h> | 18 | #include <linux/mempolicy.h> |
19 | #include <linux/debugfs.h> | ||
19 | 20 | ||
20 | #include "sched.h" | 21 | #include "sched.h" |
21 | 22 | ||
@@ -58,6 +59,309 @@ static unsigned long nsec_low(unsigned long long nsec) | |||
58 | 59 | ||
59 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) | 60 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) |
60 | 61 | ||
62 | #define SCHED_FEAT(name, enabled) \ | ||
63 | #name , | ||
64 | |||
65 | static const char * const sched_feat_names[] = { | ||
66 | #include "features.h" | ||
67 | }; | ||
68 | |||
69 | #undef SCHED_FEAT | ||
70 | |||
71 | static int sched_feat_show(struct seq_file *m, void *v) | ||
72 | { | ||
73 | int i; | ||
74 | |||
75 | for (i = 0; i < __SCHED_FEAT_NR; i++) { | ||
76 | if (!(sysctl_sched_features & (1UL << i))) | ||
77 | seq_puts(m, "NO_"); | ||
78 | seq_printf(m, "%s ", sched_feat_names[i]); | ||
79 | } | ||
80 | seq_puts(m, "\n"); | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | #ifdef HAVE_JUMP_LABEL | ||
86 | |||
87 | #define jump_label_key__true STATIC_KEY_INIT_TRUE | ||
88 | #define jump_label_key__false STATIC_KEY_INIT_FALSE | ||
89 | |||
90 | #define SCHED_FEAT(name, enabled) \ | ||
91 | jump_label_key__##enabled , | ||
92 | |||
93 | struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { | ||
94 | #include "features.h" | ||
95 | }; | ||
96 | |||
97 | #undef SCHED_FEAT | ||
98 | |||
99 | static void sched_feat_disable(int i) | ||
100 | { | ||
101 | static_key_disable(&sched_feat_keys[i]); | ||
102 | } | ||
103 | |||
104 | static void sched_feat_enable(int i) | ||
105 | { | ||
106 | static_key_enable(&sched_feat_keys[i]); | ||
107 | } | ||
108 | #else | ||
109 | static void sched_feat_disable(int i) { }; | ||
110 | static void sched_feat_enable(int i) { }; | ||
111 | #endif /* HAVE_JUMP_LABEL */ | ||
112 | |||
113 | static int sched_feat_set(char *cmp) | ||
114 | { | ||
115 | int i; | ||
116 | int neg = 0; | ||
117 | |||
118 | if (strncmp(cmp, "NO_", 3) == 0) { | ||
119 | neg = 1; | ||
120 | cmp += 3; | ||
121 | } | ||
122 | |||
123 | for (i = 0; i < __SCHED_FEAT_NR; i++) { | ||
124 | if (strcmp(cmp, sched_feat_names[i]) == 0) { | ||
125 | if (neg) { | ||
126 | sysctl_sched_features &= ~(1UL << i); | ||
127 | sched_feat_disable(i); | ||
128 | } else { | ||
129 | sysctl_sched_features |= (1UL << i); | ||
130 | sched_feat_enable(i); | ||
131 | } | ||
132 | break; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | return i; | ||
137 | } | ||
138 | |||
139 | static ssize_t | ||
140 | sched_feat_write(struct file *filp, const char __user *ubuf, | ||
141 | size_t cnt, loff_t *ppos) | ||
142 | { | ||
143 | char buf[64]; | ||
144 | char *cmp; | ||
145 | int i; | ||
146 | struct inode *inode; | ||
147 | |||
148 | if (cnt > 63) | ||
149 | cnt = 63; | ||
150 | |||
151 | if (copy_from_user(&buf, ubuf, cnt)) | ||
152 | return -EFAULT; | ||
153 | |||
154 | buf[cnt] = 0; | ||
155 | cmp = strstrip(buf); | ||
156 | |||
157 | /* Ensure the static_key remains in a consistent state */ | ||
158 | inode = file_inode(filp); | ||
159 | inode_lock(inode); | ||
160 | i = sched_feat_set(cmp); | ||
161 | inode_unlock(inode); | ||
162 | if (i == __SCHED_FEAT_NR) | ||
163 | return -EINVAL; | ||
164 | |||
165 | *ppos += cnt; | ||
166 | |||
167 | return cnt; | ||
168 | } | ||
169 | |||
170 | static int sched_feat_open(struct inode *inode, struct file *filp) | ||
171 | { | ||
172 | return single_open(filp, sched_feat_show, NULL); | ||
173 | } | ||
174 | |||
175 | static const struct file_operations sched_feat_fops = { | ||
176 | .open = sched_feat_open, | ||
177 | .write = sched_feat_write, | ||
178 | .read = seq_read, | ||
179 | .llseek = seq_lseek, | ||
180 | .release = single_release, | ||
181 | }; | ||
182 | |||
183 | static __init int sched_init_debug(void) | ||
184 | { | ||
185 | debugfs_create_file("sched_features", 0644, NULL, NULL, | ||
186 | &sched_feat_fops); | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | late_initcall(sched_init_debug); | ||
191 | |||
192 | #ifdef CONFIG_SMP | ||
193 | |||
194 | #ifdef CONFIG_SYSCTL | ||
195 | |||
196 | static struct ctl_table sd_ctl_dir[] = { | ||
197 | { | ||
198 | .procname = "sched_domain", | ||
199 | .mode = 0555, | ||
200 | }, | ||
201 | {} | ||
202 | }; | ||
203 | |||
204 | static struct ctl_table sd_ctl_root[] = { | ||
205 | { | ||
206 | .procname = "kernel", | ||
207 | .mode = 0555, | ||
208 | .child = sd_ctl_dir, | ||
209 | }, | ||
210 | {} | ||
211 | }; | ||
212 | |||
213 | static struct ctl_table *sd_alloc_ctl_entry(int n) | ||
214 | { | ||
215 | struct ctl_table *entry = | ||
216 | kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); | ||
217 | |||
218 | return entry; | ||
219 | } | ||
220 | |||
221 | static void sd_free_ctl_entry(struct ctl_table **tablep) | ||
222 | { | ||
223 | struct ctl_table *entry; | ||
224 | |||
225 | /* | ||
226 | * In the intermediate directories, both the child directory and | ||
227 | * procname are dynamically allocated and could fail but the mode | ||
228 | * will always be set. In the lowest directory the names are | ||
229 | * static strings and all have proc handlers. | ||
230 | */ | ||
231 | for (entry = *tablep; entry->mode; entry++) { | ||
232 | if (entry->child) | ||
233 | sd_free_ctl_entry(&entry->child); | ||
234 | if (entry->proc_handler == NULL) | ||
235 | kfree(entry->procname); | ||
236 | } | ||
237 | |||
238 | kfree(*tablep); | ||
239 | *tablep = NULL; | ||
240 | } | ||
241 | |||
242 | static int min_load_idx = 0; | ||
243 | static int max_load_idx = CPU_LOAD_IDX_MAX-1; | ||
244 | |||
245 | static void | ||
246 | set_table_entry(struct ctl_table *entry, | ||
247 | const char *procname, void *data, int maxlen, | ||
248 | umode_t mode, proc_handler *proc_handler, | ||
249 | bool load_idx) | ||
250 | { | ||
251 | entry->procname = procname; | ||
252 | entry->data = data; | ||
253 | entry->maxlen = maxlen; | ||
254 | entry->mode = mode; | ||
255 | entry->proc_handler = proc_handler; | ||
256 | |||
257 | if (load_idx) { | ||
258 | entry->extra1 = &min_load_idx; | ||
259 | entry->extra2 = &max_load_idx; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | static struct ctl_table * | ||
264 | sd_alloc_ctl_domain_table(struct sched_domain *sd) | ||
265 | { | ||
266 | struct ctl_table *table = sd_alloc_ctl_entry(14); | ||
267 | |||
268 | if (table == NULL) | ||
269 | return NULL; | ||
270 | |||
271 | set_table_entry(&table[0], "min_interval", &sd->min_interval, | ||
272 | sizeof(long), 0644, proc_doulongvec_minmax, false); | ||
273 | set_table_entry(&table[1], "max_interval", &sd->max_interval, | ||
274 | sizeof(long), 0644, proc_doulongvec_minmax, false); | ||
275 | set_table_entry(&table[2], "busy_idx", &sd->busy_idx, | ||
276 | sizeof(int), 0644, proc_dointvec_minmax, true); | ||
277 | set_table_entry(&table[3], "idle_idx", &sd->idle_idx, | ||
278 | sizeof(int), 0644, proc_dointvec_minmax, true); | ||
279 | set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, | ||
280 | sizeof(int), 0644, proc_dointvec_minmax, true); | ||
281 | set_table_entry(&table[5], "wake_idx", &sd->wake_idx, | ||
282 | sizeof(int), 0644, proc_dointvec_minmax, true); | ||
283 | set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, | ||
284 | sizeof(int), 0644, proc_dointvec_minmax, true); | ||
285 | set_table_entry(&table[7], "busy_factor", &sd->busy_factor, | ||
286 | sizeof(int), 0644, proc_dointvec_minmax, false); | ||
287 | set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, | ||
288 | sizeof(int), 0644, proc_dointvec_minmax, false); | ||
289 | set_table_entry(&table[9], "cache_nice_tries", | ||
290 | &sd->cache_nice_tries, | ||
291 | sizeof(int), 0644, proc_dointvec_minmax, false); | ||
292 | set_table_entry(&table[10], "flags", &sd->flags, | ||
293 | sizeof(int), 0644, proc_dointvec_minmax, false); | ||
294 | set_table_entry(&table[11], "max_newidle_lb_cost", | ||
295 | &sd->max_newidle_lb_cost, | ||
296 | sizeof(long), 0644, proc_doulongvec_minmax, false); | ||
297 | set_table_entry(&table[12], "name", sd->name, | ||
298 | CORENAME_MAX_SIZE, 0444, proc_dostring, false); | ||
299 | /* &table[13] is terminator */ | ||
300 | |||
301 | return table; | ||
302 | } | ||
303 | |||
304 | static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) | ||
305 | { | ||
306 | struct ctl_table *entry, *table; | ||
307 | struct sched_domain *sd; | ||
308 | int domain_num = 0, i; | ||
309 | char buf[32]; | ||
310 | |||
311 | for_each_domain(cpu, sd) | ||
312 | domain_num++; | ||
313 | entry = table = sd_alloc_ctl_entry(domain_num + 1); | ||
314 | if (table == NULL) | ||
315 | return NULL; | ||
316 | |||
317 | i = 0; | ||
318 | for_each_domain(cpu, sd) { | ||
319 | snprintf(buf, 32, "domain%d", i); | ||
320 | entry->procname = kstrdup(buf, GFP_KERNEL); | ||
321 | entry->mode = 0555; | ||
322 | entry->child = sd_alloc_ctl_domain_table(sd); | ||
323 | entry++; | ||
324 | i++; | ||
325 | } | ||
326 | return table; | ||
327 | } | ||
328 | |||
329 | static struct ctl_table_header *sd_sysctl_header; | ||
330 | void register_sched_domain_sysctl(void) | ||
331 | { | ||
332 | int i, cpu_num = num_possible_cpus(); | ||
333 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); | ||
334 | char buf[32]; | ||
335 | |||
336 | WARN_ON(sd_ctl_dir[0].child); | ||
337 | sd_ctl_dir[0].child = entry; | ||
338 | |||
339 | if (entry == NULL) | ||
340 | return; | ||
341 | |||
342 | for_each_possible_cpu(i) { | ||
343 | snprintf(buf, 32, "cpu%d", i); | ||
344 | entry->procname = kstrdup(buf, GFP_KERNEL); | ||
345 | entry->mode = 0555; | ||
346 | entry->child = sd_alloc_ctl_cpu_table(i); | ||
347 | entry++; | ||
348 | } | ||
349 | |||
350 | WARN_ON(sd_sysctl_header); | ||
351 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); | ||
352 | } | ||
353 | |||
354 | /* may be called multiple times per register */ | ||
355 | void unregister_sched_domain_sysctl(void) | ||
356 | { | ||
357 | unregister_sysctl_table(sd_sysctl_header); | ||
358 | sd_sysctl_header = NULL; | ||
359 | if (sd_ctl_dir[0].child) | ||
360 | sd_free_ctl_entry(&sd_ctl_dir[0].child); | ||
361 | } | ||
362 | #endif /* CONFIG_SYSCTL */ | ||
363 | #endif /* CONFIG_SMP */ | ||
364 | |||
61 | #ifdef CONFIG_FAIR_GROUP_SCHED | 365 | #ifdef CONFIG_FAIR_GROUP_SCHED |
62 | static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) | 366 | static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) |
63 | { | 367 | { |
@@ -75,16 +379,18 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group | |||
75 | PN(se->vruntime); | 379 | PN(se->vruntime); |
76 | PN(se->sum_exec_runtime); | 380 | PN(se->sum_exec_runtime); |
77 | #ifdef CONFIG_SCHEDSTATS | 381 | #ifdef CONFIG_SCHEDSTATS |
78 | PN(se->statistics.wait_start); | 382 | if (schedstat_enabled()) { |
79 | PN(se->statistics.sleep_start); | 383 | PN(se->statistics.wait_start); |
80 | PN(se->statistics.block_start); | 384 | PN(se->statistics.sleep_start); |
81 | PN(se->statistics.sleep_max); | 385 | PN(se->statistics.block_start); |
82 | PN(se->statistics.block_max); | 386 | PN(se->statistics.sleep_max); |
83 | PN(se->statistics.exec_max); | 387 | PN(se->statistics.block_max); |
84 | PN(se->statistics.slice_max); | 388 | PN(se->statistics.exec_max); |
85 | PN(se->statistics.wait_max); | 389 | PN(se->statistics.slice_max); |
86 | PN(se->statistics.wait_sum); | 390 | PN(se->statistics.wait_max); |
87 | P(se->statistics.wait_count); | 391 | PN(se->statistics.wait_sum); |
392 | P(se->statistics.wait_count); | ||
393 | } | ||
88 | #endif | 394 | #endif |
89 | P(se->load.weight); | 395 | P(se->load.weight); |
90 | #ifdef CONFIG_SMP | 396 | #ifdef CONFIG_SMP |
@@ -121,17 +427,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
121 | SPLIT_NS(p->se.vruntime), | 427 | SPLIT_NS(p->se.vruntime), |
122 | (long long)(p->nvcsw + p->nivcsw), | 428 | (long long)(p->nvcsw + p->nivcsw), |
123 | p->prio); | 429 | p->prio); |
124 | #ifdef CONFIG_SCHEDSTATS | 430 | |
125 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", | ||
126 | SPLIT_NS(p->se.statistics.wait_sum), | ||
127 | SPLIT_NS(p->se.sum_exec_runtime), | ||
128 | SPLIT_NS(p->se.statistics.sum_sleep_runtime)); | ||
129 | #else | ||
130 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", | 431 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", |
131 | 0LL, 0L, | 432 | SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)), |
132 | SPLIT_NS(p->se.sum_exec_runtime), | 433 | SPLIT_NS(p->se.sum_exec_runtime), |
133 | 0LL, 0L); | 434 | SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime))); |
134 | #endif | 435 | |
135 | #ifdef CONFIG_NUMA_BALANCING | 436 | #ifdef CONFIG_NUMA_BALANCING |
136 | SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); | 437 | SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); |
137 | #endif | 438 | #endif |
@@ -258,8 +559,17 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) | |||
258 | 559 | ||
259 | void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) | 560 | void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) |
260 | { | 561 | { |
562 | struct dl_bw *dl_bw; | ||
563 | |||
261 | SEQ_printf(m, "\ndl_rq[%d]:\n", cpu); | 564 | SEQ_printf(m, "\ndl_rq[%d]:\n", cpu); |
262 | SEQ_printf(m, " .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running); | 565 | SEQ_printf(m, " .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running); |
566 | #ifdef CONFIG_SMP | ||
567 | dl_bw = &cpu_rq(cpu)->rd->dl_bw; | ||
568 | #else | ||
569 | dl_bw = &dl_rq->dl_bw; | ||
570 | #endif | ||
571 | SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw); | ||
572 | SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw); | ||
263 | } | 573 | } |
264 | 574 | ||
265 | extern __read_mostly int sched_clock_running; | 575 | extern __read_mostly int sched_clock_running; |
@@ -309,24 +619,25 @@ do { \ | |||
309 | #undef P | 619 | #undef P |
310 | #undef PN | 620 | #undef PN |
311 | 621 | ||
312 | #ifdef CONFIG_SCHEDSTATS | ||
313 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); | ||
314 | #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); | ||
315 | |||
316 | P(yld_count); | ||
317 | |||
318 | P(sched_count); | ||
319 | P(sched_goidle); | ||
320 | #ifdef CONFIG_SMP | 622 | #ifdef CONFIG_SMP |
623 | #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); | ||
321 | P64(avg_idle); | 624 | P64(avg_idle); |
322 | P64(max_idle_balance_cost); | 625 | P64(max_idle_balance_cost); |
626 | #undef P64 | ||
323 | #endif | 627 | #endif |
324 | 628 | ||
325 | P(ttwu_count); | 629 | #ifdef CONFIG_SCHEDSTATS |
326 | P(ttwu_local); | 630 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); |
631 | |||
632 | if (schedstat_enabled()) { | ||
633 | P(yld_count); | ||
634 | P(sched_count); | ||
635 | P(sched_goidle); | ||
636 | P(ttwu_count); | ||
637 | P(ttwu_local); | ||
638 | } | ||
327 | 639 | ||
328 | #undef P | 640 | #undef P |
329 | #undef P64 | ||
330 | #endif | 641 | #endif |
331 | spin_lock_irqsave(&sched_debug_lock, flags); | 642 | spin_lock_irqsave(&sched_debug_lock, flags); |
332 | print_cfs_stats(m, cpu); | 643 | print_cfs_stats(m, cpu); |
@@ -569,38 +880,39 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
569 | nr_switches = p->nvcsw + p->nivcsw; | 880 | nr_switches = p->nvcsw + p->nivcsw; |
570 | 881 | ||
571 | #ifdef CONFIG_SCHEDSTATS | 882 | #ifdef CONFIG_SCHEDSTATS |
572 | PN(se.statistics.sum_sleep_runtime); | ||
573 | PN(se.statistics.wait_start); | ||
574 | PN(se.statistics.sleep_start); | ||
575 | PN(se.statistics.block_start); | ||
576 | PN(se.statistics.sleep_max); | ||
577 | PN(se.statistics.block_max); | ||
578 | PN(se.statistics.exec_max); | ||
579 | PN(se.statistics.slice_max); | ||
580 | PN(se.statistics.wait_max); | ||
581 | PN(se.statistics.wait_sum); | ||
582 | P(se.statistics.wait_count); | ||
583 | PN(se.statistics.iowait_sum); | ||
584 | P(se.statistics.iowait_count); | ||
585 | P(se.nr_migrations); | 883 | P(se.nr_migrations); |
586 | P(se.statistics.nr_migrations_cold); | ||
587 | P(se.statistics.nr_failed_migrations_affine); | ||
588 | P(se.statistics.nr_failed_migrations_running); | ||
589 | P(se.statistics.nr_failed_migrations_hot); | ||
590 | P(se.statistics.nr_forced_migrations); | ||
591 | P(se.statistics.nr_wakeups); | ||
592 | P(se.statistics.nr_wakeups_sync); | ||
593 | P(se.statistics.nr_wakeups_migrate); | ||
594 | P(se.statistics.nr_wakeups_local); | ||
595 | P(se.statistics.nr_wakeups_remote); | ||
596 | P(se.statistics.nr_wakeups_affine); | ||
597 | P(se.statistics.nr_wakeups_affine_attempts); | ||
598 | P(se.statistics.nr_wakeups_passive); | ||
599 | P(se.statistics.nr_wakeups_idle); | ||
600 | 884 | ||
601 | { | 885 | if (schedstat_enabled()) { |
602 | u64 avg_atom, avg_per_cpu; | 886 | u64 avg_atom, avg_per_cpu; |
603 | 887 | ||
888 | PN(se.statistics.sum_sleep_runtime); | ||
889 | PN(se.statistics.wait_start); | ||
890 | PN(se.statistics.sleep_start); | ||
891 | PN(se.statistics.block_start); | ||
892 | PN(se.statistics.sleep_max); | ||
893 | PN(se.statistics.block_max); | ||
894 | PN(se.statistics.exec_max); | ||
895 | PN(se.statistics.slice_max); | ||
896 | PN(se.statistics.wait_max); | ||
897 | PN(se.statistics.wait_sum); | ||
898 | P(se.statistics.wait_count); | ||
899 | PN(se.statistics.iowait_sum); | ||
900 | P(se.statistics.iowait_count); | ||
901 | P(se.statistics.nr_migrations_cold); | ||
902 | P(se.statistics.nr_failed_migrations_affine); | ||
903 | P(se.statistics.nr_failed_migrations_running); | ||
904 | P(se.statistics.nr_failed_migrations_hot); | ||
905 | P(se.statistics.nr_forced_migrations); | ||
906 | P(se.statistics.nr_wakeups); | ||
907 | P(se.statistics.nr_wakeups_sync); | ||
908 | P(se.statistics.nr_wakeups_migrate); | ||
909 | P(se.statistics.nr_wakeups_local); | ||
910 | P(se.statistics.nr_wakeups_remote); | ||
911 | P(se.statistics.nr_wakeups_affine); | ||
912 | P(se.statistics.nr_wakeups_affine_attempts); | ||
913 | P(se.statistics.nr_wakeups_passive); | ||
914 | P(se.statistics.nr_wakeups_idle); | ||
915 | |||
604 | avg_atom = p->se.sum_exec_runtime; | 916 | avg_atom = p->se.sum_exec_runtime; |
605 | if (nr_switches) | 917 | if (nr_switches) |
606 | avg_atom = div64_ul(avg_atom, nr_switches); | 918 | avg_atom = div64_ul(avg_atom, nr_switches); |