diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-15 11:22:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-15 11:22:16 -0400 |
commit | b5869ce7f68b233ceb81465a7644be0d9a5f3dbb (patch) | |
tree | e3611e7f038a4a4fa813532ae57a9a626fa1434d /kernel/sched_debug.c | |
parent | df3d80f5a5c74168be42788364d13cf6c83c7b9c (diff) | |
parent | 9c63d9c021f375a2708ad79043d6f4dd1291a085 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: (140 commits)
sched: sync wakeups preempt too
sched: affine sync wakeups
sched: guest CPU accounting: maintain guest state in KVM
sched: guest CPU accounting: maintain stats in account_system_time()
sched: guest CPU accounting: add guest-CPU /proc/<pid>/stat fields
sched: guest CPU accounting: add guest-CPU /proc/stat field
sched: domain sysctl fixes: add terminator comment
sched: domain sysctl fixes: do not crash on allocation failure
sched: domain sysctl fixes: unregister the sysctl table before domains
sched: domain sysctl fixes: use for_each_online_cpu()
sched: domain sysctl fixes: use kcalloc()
Make scheduler debug file operations const
sched: enable wake-idle on CONFIG_SCHED_MC=y
sched: reintroduce topology.h tunings
sched: allow the immediate migration of cache-cold tasks
sched: debug, improve migration statistics
sched: debug: increase width of debug line
sched: activate task_hot() only on fair-scheduled tasks
sched: reintroduce cache-hot affinity
sched: speed up context-switches a bit
...
Diffstat (limited to 'kernel/sched_debug.c')
-rw-r--r-- | kernel/sched_debug.c | 282 |
1 files changed, 194 insertions, 88 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index c3ee38bd3426..a5e517ec07c3 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -28,6 +28,31 @@ | |||
28 | printk(x); \ | 28 | printk(x); \ |
29 | } while (0) | 29 | } while (0) |
30 | 30 | ||
31 | /* | ||
32 | * Ease the printing of nsec fields: | ||
33 | */ | ||
34 | static long long nsec_high(long long nsec) | ||
35 | { | ||
36 | if (nsec < 0) { | ||
37 | nsec = -nsec; | ||
38 | do_div(nsec, 1000000); | ||
39 | return -nsec; | ||
40 | } | ||
41 | do_div(nsec, 1000000); | ||
42 | |||
43 | return nsec; | ||
44 | } | ||
45 | |||
46 | static unsigned long nsec_low(long long nsec) | ||
47 | { | ||
48 | if (nsec < 0) | ||
49 | nsec = -nsec; | ||
50 | |||
51 | return do_div(nsec, 1000000); | ||
52 | } | ||
53 | |||
54 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) | ||
55 | |||
31 | static void | 56 | static void |
32 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | 57 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) |
33 | { | 58 | { |
@@ -36,23 +61,19 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
36 | else | 61 | else |
37 | SEQ_printf(m, " "); | 62 | SEQ_printf(m, " "); |
38 | 63 | ||
39 | SEQ_printf(m, "%15s %5d %15Ld %13Ld %13Ld %9Ld %5d ", | 64 | SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", |
40 | p->comm, p->pid, | 65 | p->comm, p->pid, |
41 | (long long)p->se.fair_key, | 66 | SPLIT_NS(p->se.vruntime), |
42 | (long long)(p->se.fair_key - rq->cfs.fair_clock), | ||
43 | (long long)p->se.wait_runtime, | ||
44 | (long long)(p->nvcsw + p->nivcsw), | 67 | (long long)(p->nvcsw + p->nivcsw), |
45 | p->prio); | 68 | p->prio); |
46 | #ifdef CONFIG_SCHEDSTATS | 69 | #ifdef CONFIG_SCHEDSTATS |
47 | SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n", | 70 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n", |
48 | (long long)p->se.sum_exec_runtime, | 71 | SPLIT_NS(p->se.vruntime), |
49 | (long long)p->se.sum_wait_runtime, | 72 | SPLIT_NS(p->se.sum_exec_runtime), |
50 | (long long)p->se.sum_sleep_runtime, | 73 | SPLIT_NS(p->se.sum_sleep_runtime)); |
51 | (long long)p->se.wait_runtime_overruns, | ||
52 | (long long)p->se.wait_runtime_underruns); | ||
53 | #else | 74 | #else |
54 | SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n", | 75 | SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld\n", |
55 | 0LL, 0LL, 0LL, 0LL, 0LL); | 76 | 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); |
56 | #endif | 77 | #endif |
57 | } | 78 | } |
58 | 79 | ||
@@ -62,14 +83,10 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) | |||
62 | 83 | ||
63 | SEQ_printf(m, | 84 | SEQ_printf(m, |
64 | "\nrunnable tasks:\n" | 85 | "\nrunnable tasks:\n" |
65 | " task PID tree-key delta waiting" | 86 | " task PID tree-key switches prio" |
66 | " switches prio" | 87 | " exec-runtime sum-exec sum-sleep\n" |
67 | " sum-exec sum-wait sum-sleep" | 88 | "------------------------------------------------------" |
68 | " wait-overrun wait-underrun\n" | 89 | "----------------------------------------------------\n"); |
69 | "------------------------------------------------------------------" | ||
70 | "----------------" | ||
71 | "------------------------------------------------" | ||
72 | "--------------------------------\n"); | ||
73 | 90 | ||
74 | read_lock_irq(&tasklist_lock); | 91 | read_lock_irq(&tasklist_lock); |
75 | 92 | ||
@@ -83,45 +100,48 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) | |||
83 | read_unlock_irq(&tasklist_lock); | 100 | read_unlock_irq(&tasklist_lock); |
84 | } | 101 | } |
85 | 102 | ||
86 | static void | 103 | void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) |
87 | print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | ||
88 | { | 104 | { |
89 | s64 wait_runtime_rq_sum = 0; | 105 | s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, |
90 | struct task_struct *p; | 106 | spread, rq0_min_vruntime, spread0; |
91 | struct rb_node *curr; | ||
92 | unsigned long flags; | ||
93 | struct rq *rq = &per_cpu(runqueues, cpu); | 107 | struct rq *rq = &per_cpu(runqueues, cpu); |
108 | struct sched_entity *last; | ||
109 | unsigned long flags; | ||
94 | 110 | ||
95 | spin_lock_irqsave(&rq->lock, flags); | ||
96 | curr = first_fair(cfs_rq); | ||
97 | while (curr) { | ||
98 | p = rb_entry(curr, struct task_struct, se.run_node); | ||
99 | wait_runtime_rq_sum += p->se.wait_runtime; | ||
100 | |||
101 | curr = rb_next(curr); | ||
102 | } | ||
103 | spin_unlock_irqrestore(&rq->lock, flags); | ||
104 | |||
105 | SEQ_printf(m, " .%-30s: %Ld\n", "wait_runtime_rq_sum", | ||
106 | (long long)wait_runtime_rq_sum); | ||
107 | } | ||
108 | |||
109 | void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | ||
110 | { | ||
111 | SEQ_printf(m, "\ncfs_rq\n"); | 111 | SEQ_printf(m, "\ncfs_rq\n"); |
112 | 112 | ||
113 | #define P(x) \ | 113 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", |
114 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x)) | 114 | SPLIT_NS(cfs_rq->exec_clock)); |
115 | |||
116 | P(fair_clock); | ||
117 | P(exec_clock); | ||
118 | P(wait_runtime); | ||
119 | P(wait_runtime_overruns); | ||
120 | P(wait_runtime_underruns); | ||
121 | P(sleeper_bonus); | ||
122 | #undef P | ||
123 | 115 | ||
124 | print_cfs_rq_runtime_sum(m, cpu, cfs_rq); | 116 | spin_lock_irqsave(&rq->lock, flags); |
117 | if (cfs_rq->rb_leftmost) | ||
118 | MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime; | ||
119 | last = __pick_last_entity(cfs_rq); | ||
120 | if (last) | ||
121 | max_vruntime = last->vruntime; | ||
122 | min_vruntime = rq->cfs.min_vruntime; | ||
123 | rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime; | ||
124 | spin_unlock_irqrestore(&rq->lock, flags); | ||
125 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", | ||
126 | SPLIT_NS(MIN_vruntime)); | ||
127 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", | ||
128 | SPLIT_NS(min_vruntime)); | ||
129 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", | ||
130 | SPLIT_NS(max_vruntime)); | ||
131 | spread = max_vruntime - MIN_vruntime; | ||
132 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", | ||
133 | SPLIT_NS(spread)); | ||
134 | spread0 = min_vruntime - rq0_min_vruntime; | ||
135 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", | ||
136 | SPLIT_NS(spread0)); | ||
137 | SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); | ||
138 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); | ||
139 | #ifdef CONFIG_SCHEDSTATS | ||
140 | SEQ_printf(m, " .%-30s: %ld\n", "bkl_count", | ||
141 | rq->bkl_count); | ||
142 | #endif | ||
143 | SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", | ||
144 | cfs_rq->nr_spread_over); | ||
125 | } | 145 | } |
126 | 146 | ||
127 | static void print_cpu(struct seq_file *m, int cpu) | 147 | static void print_cpu(struct seq_file *m, int cpu) |
@@ -141,31 +161,32 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
141 | 161 | ||
142 | #define P(x) \ | 162 | #define P(x) \ |
143 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x)) | 163 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x)) |
164 | #define PN(x) \ | ||
165 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) | ||
144 | 166 | ||
145 | P(nr_running); | 167 | P(nr_running); |
146 | SEQ_printf(m, " .%-30s: %lu\n", "load", | 168 | SEQ_printf(m, " .%-30s: %lu\n", "load", |
147 | rq->ls.load.weight); | 169 | rq->load.weight); |
148 | P(ls.delta_fair); | ||
149 | P(ls.delta_exec); | ||
150 | P(nr_switches); | 170 | P(nr_switches); |
151 | P(nr_load_updates); | 171 | P(nr_load_updates); |
152 | P(nr_uninterruptible); | 172 | P(nr_uninterruptible); |
153 | SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies); | 173 | SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies); |
154 | P(next_balance); | 174 | PN(next_balance); |
155 | P(curr->pid); | 175 | P(curr->pid); |
156 | P(clock); | 176 | PN(clock); |
157 | P(idle_clock); | 177 | PN(idle_clock); |
158 | P(prev_clock_raw); | 178 | PN(prev_clock_raw); |
159 | P(clock_warps); | 179 | P(clock_warps); |
160 | P(clock_overflows); | 180 | P(clock_overflows); |
161 | P(clock_deep_idle_events); | 181 | P(clock_deep_idle_events); |
162 | P(clock_max_delta); | 182 | PN(clock_max_delta); |
163 | P(cpu_load[0]); | 183 | P(cpu_load[0]); |
164 | P(cpu_load[1]); | 184 | P(cpu_load[1]); |
165 | P(cpu_load[2]); | 185 | P(cpu_load[2]); |
166 | P(cpu_load[3]); | 186 | P(cpu_load[3]); |
167 | P(cpu_load[4]); | 187 | P(cpu_load[4]); |
168 | #undef P | 188 | #undef P |
189 | #undef PN | ||
169 | 190 | ||
170 | print_cfs_stats(m, cpu); | 191 | print_cfs_stats(m, cpu); |
171 | 192 | ||
@@ -177,12 +198,25 @@ static int sched_debug_show(struct seq_file *m, void *v) | |||
177 | u64 now = ktime_to_ns(ktime_get()); | 198 | u64 now = ktime_to_ns(ktime_get()); |
178 | int cpu; | 199 | int cpu; |
179 | 200 | ||
180 | SEQ_printf(m, "Sched Debug Version: v0.05-v20, %s %.*s\n", | 201 | SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n", |
181 | init_utsname()->release, | 202 | init_utsname()->release, |
182 | (int)strcspn(init_utsname()->version, " "), | 203 | (int)strcspn(init_utsname()->version, " "), |
183 | init_utsname()->version); | 204 | init_utsname()->version); |
184 | 205 | ||
185 | SEQ_printf(m, "now at %Lu nsecs\n", (unsigned long long)now); | 206 | SEQ_printf(m, "now at %Lu.%06ld msecs\n", SPLIT_NS(now)); |
207 | |||
208 | #define P(x) \ | ||
209 | SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) | ||
210 | #define PN(x) \ | ||
211 | SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) | ||
212 | PN(sysctl_sched_latency); | ||
213 | PN(sysctl_sched_nr_latency); | ||
214 | PN(sysctl_sched_wakeup_granularity); | ||
215 | PN(sysctl_sched_batch_wakeup_granularity); | ||
216 | PN(sysctl_sched_child_runs_first); | ||
217 | P(sysctl_sched_features); | ||
218 | #undef PN | ||
219 | #undef P | ||
186 | 220 | ||
187 | for_each_online_cpu(cpu) | 221 | for_each_online_cpu(cpu) |
188 | print_cpu(m, cpu); | 222 | print_cpu(m, cpu); |
@@ -202,7 +236,7 @@ static int sched_debug_open(struct inode *inode, struct file *filp) | |||
202 | return single_open(filp, sched_debug_show, NULL); | 236 | return single_open(filp, sched_debug_show, NULL); |
203 | } | 237 | } |
204 | 238 | ||
205 | static struct file_operations sched_debug_fops = { | 239 | static const struct file_operations sched_debug_fops = { |
206 | .open = sched_debug_open, | 240 | .open = sched_debug_open, |
207 | .read = seq_read, | 241 | .read = seq_read, |
208 | .llseek = seq_lseek, | 242 | .llseek = seq_lseek, |
@@ -226,6 +260,7 @@ __initcall(init_sched_debug_procfs); | |||
226 | 260 | ||
227 | void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | 261 | void proc_sched_show_task(struct task_struct *p, struct seq_file *m) |
228 | { | 262 | { |
263 | unsigned long nr_switches; | ||
229 | unsigned long flags; | 264 | unsigned long flags; |
230 | int num_threads = 1; | 265 | int num_threads = 1; |
231 | 266 | ||
@@ -237,41 +272,89 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
237 | rcu_read_unlock(); | 272 | rcu_read_unlock(); |
238 | 273 | ||
239 | SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); | 274 | SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); |
240 | SEQ_printf(m, "----------------------------------------------\n"); | 275 | SEQ_printf(m, |
276 | "---------------------------------------------------------\n"); | ||
277 | #define __P(F) \ | ||
278 | SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F) | ||
241 | #define P(F) \ | 279 | #define P(F) \ |
242 | SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F) | 280 | SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F) |
281 | #define __PN(F) \ | ||
282 | SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) | ||
283 | #define PN(F) \ | ||
284 | SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) | ||
243 | 285 | ||
244 | P(se.wait_runtime); | 286 | PN(se.exec_start); |
245 | P(se.wait_start_fair); | 287 | PN(se.vruntime); |
246 | P(se.exec_start); | 288 | PN(se.sum_exec_runtime); |
247 | P(se.sleep_start_fair); | 289 | |
248 | P(se.sum_exec_runtime); | 290 | nr_switches = p->nvcsw + p->nivcsw; |
249 | 291 | ||
250 | #ifdef CONFIG_SCHEDSTATS | 292 | #ifdef CONFIG_SCHEDSTATS |
251 | P(se.wait_start); | 293 | PN(se.wait_start); |
252 | P(se.sleep_start); | 294 | PN(se.sleep_start); |
253 | P(se.block_start); | 295 | PN(se.block_start); |
254 | P(se.sleep_max); | 296 | PN(se.sleep_max); |
255 | P(se.block_max); | 297 | PN(se.block_max); |
256 | P(se.exec_max); | 298 | PN(se.exec_max); |
257 | P(se.wait_max); | 299 | PN(se.slice_max); |
258 | P(se.wait_runtime_overruns); | 300 | PN(se.wait_max); |
259 | P(se.wait_runtime_underruns); | 301 | P(sched_info.bkl_count); |
260 | P(se.sum_wait_runtime); | 302 | P(se.nr_migrations); |
303 | P(se.nr_migrations_cold); | ||
304 | P(se.nr_failed_migrations_affine); | ||
305 | P(se.nr_failed_migrations_running); | ||
306 | P(se.nr_failed_migrations_hot); | ||
307 | P(se.nr_forced_migrations); | ||
308 | P(se.nr_forced2_migrations); | ||
309 | P(se.nr_wakeups); | ||
310 | P(se.nr_wakeups_sync); | ||
311 | P(se.nr_wakeups_migrate); | ||
312 | P(se.nr_wakeups_local); | ||
313 | P(se.nr_wakeups_remote); | ||
314 | P(se.nr_wakeups_affine); | ||
315 | P(se.nr_wakeups_affine_attempts); | ||
316 | P(se.nr_wakeups_passive); | ||
317 | P(se.nr_wakeups_idle); | ||
318 | |||
319 | { | ||
320 | u64 avg_atom, avg_per_cpu; | ||
321 | |||
322 | avg_atom = p->se.sum_exec_runtime; | ||
323 | if (nr_switches) | ||
324 | do_div(avg_atom, nr_switches); | ||
325 | else | ||
326 | avg_atom = -1LL; | ||
327 | |||
328 | avg_per_cpu = p->se.sum_exec_runtime; | ||
329 | if (p->se.nr_migrations) | ||
330 | avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations); | ||
331 | else | ||
332 | avg_per_cpu = -1LL; | ||
333 | |||
334 | __PN(avg_atom); | ||
335 | __PN(avg_per_cpu); | ||
336 | } | ||
261 | #endif | 337 | #endif |
262 | SEQ_printf(m, "%-25s:%20Ld\n", | 338 | __P(nr_switches); |
263 | "nr_switches", (long long)(p->nvcsw + p->nivcsw)); | 339 | SEQ_printf(m, "%-35s:%21Ld\n", |
340 | "nr_voluntary_switches", (long long)p->nvcsw); | ||
341 | SEQ_printf(m, "%-35s:%21Ld\n", | ||
342 | "nr_involuntary_switches", (long long)p->nivcsw); | ||
343 | |||
264 | P(se.load.weight); | 344 | P(se.load.weight); |
265 | P(policy); | 345 | P(policy); |
266 | P(prio); | 346 | P(prio); |
347 | #undef PN | ||
348 | #undef __PN | ||
267 | #undef P | 349 | #undef P |
350 | #undef __P | ||
268 | 351 | ||
269 | { | 352 | { |
270 | u64 t0, t1; | 353 | u64 t0, t1; |
271 | 354 | ||
272 | t0 = sched_clock(); | 355 | t0 = sched_clock(); |
273 | t1 = sched_clock(); | 356 | t1 = sched_clock(); |
274 | SEQ_printf(m, "%-25s:%20Ld\n", | 357 | SEQ_printf(m, "%-35s:%21Ld\n", |
275 | "clock-delta", (long long)(t1-t0)); | 358 | "clock-delta", (long long)(t1-t0)); |
276 | } | 359 | } |
277 | } | 360 | } |
@@ -279,9 +362,32 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
279 | void proc_sched_set_task(struct task_struct *p) | 362 | void proc_sched_set_task(struct task_struct *p) |
280 | { | 363 | { |
281 | #ifdef CONFIG_SCHEDSTATS | 364 | #ifdef CONFIG_SCHEDSTATS |
282 | p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0; | 365 | p->se.wait_max = 0; |
283 | p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0; | 366 | p->se.sleep_max = 0; |
367 | p->se.sum_sleep_runtime = 0; | ||
368 | p->se.block_max = 0; | ||
369 | p->se.exec_max = 0; | ||
370 | p->se.slice_max = 0; | ||
371 | p->se.nr_migrations = 0; | ||
372 | p->se.nr_migrations_cold = 0; | ||
373 | p->se.nr_failed_migrations_affine = 0; | ||
374 | p->se.nr_failed_migrations_running = 0; | ||
375 | p->se.nr_failed_migrations_hot = 0; | ||
376 | p->se.nr_forced_migrations = 0; | ||
377 | p->se.nr_forced2_migrations = 0; | ||
378 | p->se.nr_wakeups = 0; | ||
379 | p->se.nr_wakeups_sync = 0; | ||
380 | p->se.nr_wakeups_migrate = 0; | ||
381 | p->se.nr_wakeups_local = 0; | ||
382 | p->se.nr_wakeups_remote = 0; | ||
383 | p->se.nr_wakeups_affine = 0; | ||
384 | p->se.nr_wakeups_affine_attempts = 0; | ||
385 | p->se.nr_wakeups_passive = 0; | ||
386 | p->se.nr_wakeups_idle = 0; | ||
387 | p->sched_info.bkl_count = 0; | ||
284 | #endif | 388 | #endif |
285 | p->se.sum_exec_runtime = 0; | 389 | p->se.sum_exec_runtime = 0; |
286 | p->se.prev_sum_exec_runtime = 0; | 390 | p->se.prev_sum_exec_runtime = 0; |
391 | p->nvcsw = 0; | ||
392 | p->nivcsw = 0; | ||
287 | } | 393 | } |