diff options
-rw-r--r-- | include/linux/sched.h | 9 | ||||
-rw-r--r-- | kernel/sched.c | 38 | ||||
-rw-r--r-- | kernel/sched_debug.c | 54 | ||||
-rw-r--r-- | kernel/sched_fair.c | 74 |
4 files changed, 14 insertions, 161 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 353630d6ae4b..572df1bbaeec 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -888,13 +888,9 @@ struct load_weight { | |||
888 | * 4 se->block_start | 888 | * 4 se->block_start |
889 | * 4 se->run_node | 889 | * 4 se->run_node |
890 | * 4 se->sleep_start | 890 | * 4 se->sleep_start |
891 | * 4 se->sleep_start_fair | ||
892 | * 6 se->load.weight | 891 | * 6 se->load.weight |
893 | * 7 se->delta_fair | ||
894 | * 15 se->wait_runtime | ||
895 | */ | 892 | */ |
896 | struct sched_entity { | 893 | struct sched_entity { |
897 | long wait_runtime; | ||
898 | s64 fair_key; | 894 | s64 fair_key; |
899 | struct load_weight load; /* for load-balancing */ | 895 | struct load_weight load; /* for load-balancing */ |
900 | struct rb_node run_node; | 896 | struct rb_node run_node; |
@@ -904,12 +900,10 @@ struct sched_entity { | |||
904 | u64 sum_exec_runtime; | 900 | u64 sum_exec_runtime; |
905 | u64 vruntime; | 901 | u64 vruntime; |
906 | u64 prev_sum_exec_runtime; | 902 | u64 prev_sum_exec_runtime; |
907 | u64 wait_start_fair; | ||
908 | 903 | ||
909 | #ifdef CONFIG_SCHEDSTATS | 904 | #ifdef CONFIG_SCHEDSTATS |
910 | u64 wait_start; | 905 | u64 wait_start; |
911 | u64 wait_max; | 906 | u64 wait_max; |
912 | s64 sum_wait_runtime; | ||
913 | 907 | ||
914 | u64 sleep_start; | 908 | u64 sleep_start; |
915 | u64 sleep_max; | 909 | u64 sleep_max; |
@@ -919,9 +913,6 @@ struct sched_entity { | |||
919 | u64 block_max; | 913 | u64 block_max; |
920 | u64 exec_max; | 914 | u64 exec_max; |
921 | u64 slice_max; | 915 | u64 slice_max; |
922 | |||
923 | unsigned long wait_runtime_overruns; | ||
924 | unsigned long wait_runtime_underruns; | ||
925 | #endif | 916 | #endif |
926 | 917 | ||
927 | #ifdef CONFIG_FAIR_GROUP_SCHED | 918 | #ifdef CONFIG_FAIR_GROUP_SCHED |
diff --git a/kernel/sched.c b/kernel/sched.c index 21cc3b2be023..0f0cf374c775 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -176,11 +176,8 @@ struct cfs_rq { | |||
176 | struct load_weight load; | 176 | struct load_weight load; |
177 | unsigned long nr_running; | 177 | unsigned long nr_running; |
178 | 178 | ||
179 | s64 fair_clock; | ||
180 | u64 exec_clock; | 179 | u64 exec_clock; |
181 | u64 min_vruntime; | 180 | u64 min_vruntime; |
182 | s64 wait_runtime; | ||
183 | unsigned long wait_runtime_overruns, wait_runtime_underruns; | ||
184 | 181 | ||
185 | struct rb_root tasks_timeline; | 182 | struct rb_root tasks_timeline; |
186 | struct rb_node *rb_leftmost; | 183 | struct rb_node *rb_leftmost; |
@@ -389,20 +386,14 @@ static void update_rq_clock(struct rq *rq) | |||
389 | * Debugging: various feature bits | 386 | * Debugging: various feature bits |
390 | */ | 387 | */ |
391 | enum { | 388 | enum { |
392 | SCHED_FEAT_FAIR_SLEEPERS = 1, | 389 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, |
393 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 2, | 390 | SCHED_FEAT_START_DEBIT = 2, |
394 | SCHED_FEAT_SLEEPER_AVG = 4, | 391 | SCHED_FEAT_USE_TREE_AVG = 4, |
395 | SCHED_FEAT_SLEEPER_LOAD_AVG = 8, | 392 | SCHED_FEAT_APPROX_AVG = 8, |
396 | SCHED_FEAT_START_DEBIT = 16, | ||
397 | SCHED_FEAT_USE_TREE_AVG = 32, | ||
398 | SCHED_FEAT_APPROX_AVG = 64, | ||
399 | }; | 393 | }; |
400 | 394 | ||
401 | const_debug unsigned int sysctl_sched_features = | 395 | const_debug unsigned int sysctl_sched_features = |
402 | SCHED_FEAT_FAIR_SLEEPERS *0 | | ||
403 | SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | | 396 | SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | |
404 | SCHED_FEAT_SLEEPER_AVG *0 | | ||
405 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | | ||
406 | SCHED_FEAT_START_DEBIT *1 | | 397 | SCHED_FEAT_START_DEBIT *1 | |
407 | SCHED_FEAT_USE_TREE_AVG *0 | | 398 | SCHED_FEAT_USE_TREE_AVG *0 | |
408 | SCHED_FEAT_APPROX_AVG *0; | 399 | SCHED_FEAT_APPROX_AVG *0; |
@@ -716,15 +707,11 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw) | |||
716 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) | 707 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) |
717 | { | 708 | { |
718 | lw->weight += inc; | 709 | lw->weight += inc; |
719 | if (sched_feat(FAIR_SLEEPERS)) | ||
720 | lw->inv_weight = WMULT_CONST / lw->weight; | ||
721 | } | 710 | } |
722 | 711 | ||
723 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | 712 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) |
724 | { | 713 | { |
725 | lw->weight -= dec; | 714 | lw->weight -= dec; |
726 | if (sched_feat(FAIR_SLEEPERS) && likely(lw->weight)) | ||
727 | lw->inv_weight = WMULT_CONST / lw->weight; | ||
728 | } | 715 | } |
729 | 716 | ||
730 | /* | 717 | /* |
@@ -848,8 +835,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq) | |||
848 | 835 | ||
849 | static void set_load_weight(struct task_struct *p) | 836 | static void set_load_weight(struct task_struct *p) |
850 | { | 837 | { |
851 | p->se.wait_runtime = 0; | ||
852 | |||
853 | if (task_has_rt_policy(p)) { | 838 | if (task_has_rt_policy(p)) { |
854 | p->se.load.weight = prio_to_weight[0] * 2; | 839 | p->se.load.weight = prio_to_weight[0] * 2; |
855 | p->se.load.inv_weight = prio_to_wmult[0] >> 1; | 840 | p->se.load.inv_weight = prio_to_wmult[0] >> 1; |
@@ -995,13 +980,9 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
995 | { | 980 | { |
996 | int old_cpu = task_cpu(p); | 981 | int old_cpu = task_cpu(p); |
997 | struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu); | 982 | struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu); |
998 | u64 clock_offset, fair_clock_offset; | 983 | u64 clock_offset; |
999 | 984 | ||
1000 | clock_offset = old_rq->clock - new_rq->clock; | 985 | clock_offset = old_rq->clock - new_rq->clock; |
1001 | fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock; | ||
1002 | |||
1003 | if (p->se.wait_start_fair) | ||
1004 | p->se.wait_start_fair -= fair_clock_offset; | ||
1005 | 986 | ||
1006 | #ifdef CONFIG_SCHEDSTATS | 987 | #ifdef CONFIG_SCHEDSTATS |
1007 | if (p->se.wait_start) | 988 | if (p->se.wait_start) |
@@ -1571,15 +1552,12 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state) | |||
1571 | */ | 1552 | */ |
1572 | static void __sched_fork(struct task_struct *p) | 1553 | static void __sched_fork(struct task_struct *p) |
1573 | { | 1554 | { |
1574 | p->se.wait_start_fair = 0; | ||
1575 | p->se.exec_start = 0; | 1555 | p->se.exec_start = 0; |
1576 | p->se.sum_exec_runtime = 0; | 1556 | p->se.sum_exec_runtime = 0; |
1577 | p->se.prev_sum_exec_runtime = 0; | 1557 | p->se.prev_sum_exec_runtime = 0; |
1578 | p->se.wait_runtime = 0; | ||
1579 | 1558 | ||
1580 | #ifdef CONFIG_SCHEDSTATS | 1559 | #ifdef CONFIG_SCHEDSTATS |
1581 | p->se.wait_start = 0; | 1560 | p->se.wait_start = 0; |
1582 | p->se.sum_wait_runtime = 0; | ||
1583 | p->se.sum_sleep_runtime = 0; | 1561 | p->se.sum_sleep_runtime = 0; |
1584 | p->se.sleep_start = 0; | 1562 | p->se.sleep_start = 0; |
1585 | p->se.block_start = 0; | 1563 | p->se.block_start = 0; |
@@ -1588,8 +1566,6 @@ static void __sched_fork(struct task_struct *p) | |||
1588 | p->se.exec_max = 0; | 1566 | p->se.exec_max = 0; |
1589 | p->se.slice_max = 0; | 1567 | p->se.slice_max = 0; |
1590 | p->se.wait_max = 0; | 1568 | p->se.wait_max = 0; |
1591 | p->se.wait_runtime_overruns = 0; | ||
1592 | p->se.wait_runtime_underruns = 0; | ||
1593 | #endif | 1569 | #endif |
1594 | 1570 | ||
1595 | INIT_LIST_HEAD(&p->run_list); | 1571 | INIT_LIST_HEAD(&p->run_list); |
@@ -6436,7 +6412,6 @@ int in_sched_functions(unsigned long addr) | |||
6436 | static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) | 6412 | static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) |
6437 | { | 6413 | { |
6438 | cfs_rq->tasks_timeline = RB_ROOT; | 6414 | cfs_rq->tasks_timeline = RB_ROOT; |
6439 | cfs_rq->fair_clock = 1; | ||
6440 | #ifdef CONFIG_FAIR_GROUP_SCHED | 6415 | #ifdef CONFIG_FAIR_GROUP_SCHED |
6441 | cfs_rq->rq = rq; | 6416 | cfs_rq->rq = rq; |
6442 | #endif | 6417 | #endif |
@@ -6562,15 +6537,12 @@ void normalize_rt_tasks(void) | |||
6562 | read_lock_irq(&tasklist_lock); | 6537 | read_lock_irq(&tasklist_lock); |
6563 | do_each_thread(g, p) { | 6538 | do_each_thread(g, p) { |
6564 | p->se.fair_key = 0; | 6539 | p->se.fair_key = 0; |
6565 | p->se.wait_runtime = 0; | ||
6566 | p->se.exec_start = 0; | 6540 | p->se.exec_start = 0; |
6567 | p->se.wait_start_fair = 0; | ||
6568 | #ifdef CONFIG_SCHEDSTATS | 6541 | #ifdef CONFIG_SCHEDSTATS |
6569 | p->se.wait_start = 0; | 6542 | p->se.wait_start = 0; |
6570 | p->se.sleep_start = 0; | 6543 | p->se.sleep_start = 0; |
6571 | p->se.block_start = 0; | 6544 | p->se.block_start = 0; |
6572 | #endif | 6545 | #endif |
6573 | task_rq(p)->cfs.fair_clock = 0; | ||
6574 | task_rq(p)->clock = 0; | 6546 | task_rq(p)->clock = 0; |
6575 | 6547 | ||
6576 | if (!rt_task(p)) { | 6548 | if (!rt_task(p)) { |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 3350169a7d2a..e3b62324ac31 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -36,21 +36,16 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
36 | else | 36 | else |
37 | SEQ_printf(m, " "); | 37 | SEQ_printf(m, " "); |
38 | 38 | ||
39 | SEQ_printf(m, "%15s %5d %15Ld %13Ld %13Ld %9Ld %5d ", | 39 | SEQ_printf(m, "%15s %5d %15Ld %13Ld %5d ", |
40 | p->comm, p->pid, | 40 | p->comm, p->pid, |
41 | (long long)p->se.fair_key, | 41 | (long long)p->se.fair_key, |
42 | (long long)(p->se.fair_key - rq->cfs.fair_clock), | ||
43 | (long long)p->se.wait_runtime, | ||
44 | (long long)(p->nvcsw + p->nivcsw), | 42 | (long long)(p->nvcsw + p->nivcsw), |
45 | p->prio); | 43 | p->prio); |
46 | #ifdef CONFIG_SCHEDSTATS | 44 | #ifdef CONFIG_SCHEDSTATS |
47 | SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld %15Ld\n", | 45 | SEQ_printf(m, "%15Ld %15Ld %15Ld\n", |
48 | (long long)p->se.vruntime, | 46 | (long long)p->se.vruntime, |
49 | (long long)p->se.sum_exec_runtime, | 47 | (long long)p->se.sum_exec_runtime, |
50 | (long long)p->se.sum_wait_runtime, | 48 | (long long)p->se.sum_sleep_runtime); |
51 | (long long)p->se.sum_sleep_runtime, | ||
52 | (long long)p->se.wait_runtime_overruns, | ||
53 | (long long)p->se.wait_runtime_underruns); | ||
54 | #else | 49 | #else |
55 | SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n", | 50 | SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n", |
56 | 0LL, 0LL, 0LL, 0LL, 0LL); | 51 | 0LL, 0LL, 0LL, 0LL, 0LL); |
@@ -63,10 +58,8 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) | |||
63 | 58 | ||
64 | SEQ_printf(m, | 59 | SEQ_printf(m, |
65 | "\nrunnable tasks:\n" | 60 | "\nrunnable tasks:\n" |
66 | " task PID tree-key delta waiting" | 61 | " task PID tree-key switches prio" |
67 | " switches prio" | 62 | " exec-runtime sum-exec sum-sleep\n" |
68 | " exec-runtime sum-exec sum-wait sum-sleep" | ||
69 | " wait-overrun wait-underrun\n" | ||
70 | "------------------------------------------------------------------" | 63 | "------------------------------------------------------------------" |
71 | "--------------------------------" | 64 | "--------------------------------" |
72 | "------------------------------------------------" | 65 | "------------------------------------------------" |
@@ -84,29 +77,6 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) | |||
84 | read_unlock_irq(&tasklist_lock); | 77 | read_unlock_irq(&tasklist_lock); |
85 | } | 78 | } |
86 | 79 | ||
87 | static void | ||
88 | print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | ||
89 | { | ||
90 | s64 wait_runtime_rq_sum = 0; | ||
91 | struct task_struct *p; | ||
92 | struct rb_node *curr; | ||
93 | unsigned long flags; | ||
94 | struct rq *rq = &per_cpu(runqueues, cpu); | ||
95 | |||
96 | spin_lock_irqsave(&rq->lock, flags); | ||
97 | curr = first_fair(cfs_rq); | ||
98 | while (curr) { | ||
99 | p = rb_entry(curr, struct task_struct, se.run_node); | ||
100 | wait_runtime_rq_sum += p->se.wait_runtime; | ||
101 | |||
102 | curr = rb_next(curr); | ||
103 | } | ||
104 | spin_unlock_irqrestore(&rq->lock, flags); | ||
105 | |||
106 | SEQ_printf(m, " .%-30s: %Ld\n", "wait_runtime_rq_sum", | ||
107 | (long long)wait_runtime_rq_sum); | ||
108 | } | ||
109 | |||
110 | void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | 80 | void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) |
111 | { | 81 | { |
112 | s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, | 82 | s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, |
@@ -120,7 +90,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
120 | #define P(x) \ | 90 | #define P(x) \ |
121 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x)) | 91 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x)) |
122 | 92 | ||
123 | P(fair_clock); | ||
124 | P(exec_clock); | 93 | P(exec_clock); |
125 | 94 | ||
126 | spin_lock_irqsave(&rq->lock, flags); | 95 | spin_lock_irqsave(&rq->lock, flags); |
@@ -144,13 +113,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
144 | spread0 = min_vruntime - rq0_min_vruntime; | 113 | spread0 = min_vruntime - rq0_min_vruntime; |
145 | SEQ_printf(m, " .%-30s: %Ld\n", "spread0", | 114 | SEQ_printf(m, " .%-30s: %Ld\n", "spread0", |
146 | (long long)spread0); | 115 | (long long)spread0); |
147 | |||
148 | P(wait_runtime); | ||
149 | P(wait_runtime_overruns); | ||
150 | P(wait_runtime_underruns); | ||
151 | #undef P | 116 | #undef P |
152 | |||
153 | print_cfs_rq_runtime_sum(m, cpu, cfs_rq); | ||
154 | } | 117 | } |
155 | 118 | ||
156 | static void print_cpu(struct seq_file *m, int cpu) | 119 | static void print_cpu(struct seq_file *m, int cpu) |
@@ -268,8 +231,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
268 | #define P(F) \ | 231 | #define P(F) \ |
269 | SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F) | 232 | SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F) |
270 | 233 | ||
271 | P(se.wait_runtime); | ||
272 | P(se.wait_start_fair); | ||
273 | P(se.exec_start); | 234 | P(se.exec_start); |
274 | P(se.vruntime); | 235 | P(se.vruntime); |
275 | P(se.sum_exec_runtime); | 236 | P(se.sum_exec_runtime); |
@@ -283,9 +244,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
283 | P(se.exec_max); | 244 | P(se.exec_max); |
284 | P(se.slice_max); | 245 | P(se.slice_max); |
285 | P(se.wait_max); | 246 | P(se.wait_max); |
286 | P(se.wait_runtime_overruns); | ||
287 | P(se.wait_runtime_underruns); | ||
288 | P(se.sum_wait_runtime); | ||
289 | #endif | 247 | #endif |
290 | SEQ_printf(m, "%-25s:%20Ld\n", | 248 | SEQ_printf(m, "%-25s:%20Ld\n", |
291 | "nr_switches", (long long)(p->nvcsw + p->nivcsw)); | 249 | "nr_switches", (long long)(p->nvcsw + p->nivcsw)); |
@@ -312,8 +270,6 @@ void proc_sched_set_task(struct task_struct *p) | |||
312 | p->se.exec_max = 0; | 270 | p->se.exec_max = 0; |
313 | p->se.slice_max = 0; | 271 | p->se.slice_max = 0; |
314 | p->se.wait_max = 0; | 272 | p->se.wait_max = 0; |
315 | p->se.wait_runtime_overruns = 0; | ||
316 | p->se.wait_runtime_underruns = 0; | ||
317 | #endif | 273 | #endif |
318 | p->se.sum_exec_runtime = 0; | 274 | p->se.sum_exec_runtime = 0; |
319 | p->se.prev_sum_exec_runtime = 0; | 275 | p->se.prev_sum_exec_runtime = 0; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a94189c42d1a..2df5a6467812 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -178,8 +178,6 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
178 | update_load_add(&cfs_rq->load, se->load.weight); | 178 | update_load_add(&cfs_rq->load, se->load.weight); |
179 | cfs_rq->nr_running++; | 179 | cfs_rq->nr_running++; |
180 | se->on_rq = 1; | 180 | se->on_rq = 1; |
181 | |||
182 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); | ||
183 | } | 181 | } |
184 | 182 | ||
185 | static void | 183 | static void |
@@ -192,8 +190,6 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
192 | update_load_sub(&cfs_rq->load, se->load.weight); | 190 | update_load_sub(&cfs_rq->load, se->load.weight); |
193 | cfs_rq->nr_running--; | 191 | cfs_rq->nr_running--; |
194 | se->on_rq = 0; | 192 | se->on_rq = 0; |
195 | |||
196 | schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime); | ||
197 | } | 193 | } |
198 | 194 | ||
199 | static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) | 195 | static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) |
@@ -249,13 +245,6 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
249 | return period; | 245 | return period; |
250 | } | 246 | } |
251 | 247 | ||
252 | static void | ||
253 | add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) | ||
254 | { | ||
255 | se->wait_runtime += delta; | ||
256 | schedstat_add(cfs_rq, wait_runtime, delta); | ||
257 | } | ||
258 | |||
259 | /* | 248 | /* |
260 | * Update the current task's runtime statistics. Skip current tasks that | 249 | * Update the current task's runtime statistics. Skip current tasks that |
261 | * are not in our scheduling class. | 250 | * are not in our scheduling class. |
@@ -264,9 +253,7 @@ static inline void | |||
264 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 253 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, |
265 | unsigned long delta_exec) | 254 | unsigned long delta_exec) |
266 | { | 255 | { |
267 | unsigned long delta_fair, delta_mine, delta_exec_weighted; | 256 | unsigned long delta_exec_weighted; |
268 | struct load_weight *lw = &cfs_rq->load; | ||
269 | unsigned long load = lw->weight; | ||
270 | 257 | ||
271 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 258 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); |
272 | 259 | ||
@@ -278,25 +265,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
278 | &curr->load); | 265 | &curr->load); |
279 | } | 266 | } |
280 | curr->vruntime += delta_exec_weighted; | 267 | curr->vruntime += delta_exec_weighted; |
281 | |||
282 | if (!sched_feat(FAIR_SLEEPERS)) | ||
283 | return; | ||
284 | |||
285 | if (unlikely(!load)) | ||
286 | return; | ||
287 | |||
288 | delta_fair = calc_delta_fair(delta_exec, lw); | ||
289 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); | ||
290 | |||
291 | cfs_rq->fair_clock += delta_fair; | ||
292 | /* | ||
293 | * We executed delta_exec amount of time on the CPU, | ||
294 | * but we were only entitled to delta_mine amount of | ||
295 | * time during that period (if nr_running == 1 then | ||
296 | * the two values are equal) | ||
297 | * [Note: delta_mine - delta_exec is negative]: | ||
298 | */ | ||
299 | add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec); | ||
300 | } | 268 | } |
301 | 269 | ||
302 | static void update_curr(struct cfs_rq *cfs_rq) | 270 | static void update_curr(struct cfs_rq *cfs_rq) |
@@ -322,7 +290,6 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
322 | static inline void | 290 | static inline void |
323 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 291 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
324 | { | 292 | { |
325 | se->wait_start_fair = cfs_rq->fair_clock; | ||
326 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); | 293 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); |
327 | } | 294 | } |
328 | 295 | ||
@@ -354,35 +321,11 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
354 | se->fair_key = se->vruntime; | 321 | se->fair_key = se->vruntime; |
355 | } | 322 | } |
356 | 323 | ||
357 | /* | ||
358 | * Note: must be called with a freshly updated rq->fair_clock. | ||
359 | */ | ||
360 | static inline void | ||
361 | __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, | ||
362 | unsigned long delta_fair) | ||
363 | { | ||
364 | schedstat_set(se->wait_max, max(se->wait_max, | ||
365 | rq_of(cfs_rq)->clock - se->wait_start)); | ||
366 | |||
367 | delta_fair = calc_weighted(delta_fair, se); | ||
368 | |||
369 | add_wait_runtime(cfs_rq, se, delta_fair); | ||
370 | } | ||
371 | |||
372 | static void | 324 | static void |
373 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 325 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) |
374 | { | 326 | { |
375 | unsigned long delta_fair; | 327 | schedstat_set(se->wait_max, max(se->wait_max, |
376 | 328 | rq_of(cfs_rq)->clock - se->wait_start)); | |
377 | if (unlikely(!se->wait_start_fair)) | ||
378 | return; | ||
379 | |||
380 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), | ||
381 | (u64)(cfs_rq->fair_clock - se->wait_start_fair)); | ||
382 | |||
383 | __update_stats_wait_end(cfs_rq, se, delta_fair); | ||
384 | |||
385 | se->wait_start_fair = 0; | ||
386 | schedstat_set(se->wait_start, 0); | 329 | schedstat_set(se->wait_start, 0); |
387 | } | 330 | } |
388 | 331 | ||
@@ -552,9 +495,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
552 | /* | 495 | /* |
553 | * Any task has to be enqueued before it get to execute on | 496 | * Any task has to be enqueued before it get to execute on |
554 | * a CPU. So account for the time it spent waiting on the | 497 | * a CPU. So account for the time it spent waiting on the |
555 | * runqueue. (note, here we rely on pick_next_task() having | 498 | * runqueue. |
556 | * done a put_prev_task_fair() shortly before this, which | ||
557 | * updated rq->fair_clock - used by update_stats_wait_end()) | ||
558 | */ | 499 | */ |
559 | update_stats_wait_end(cfs_rq, se); | 500 | update_stats_wait_end(cfs_rq, se); |
560 | update_stats_curr_start(cfs_rq, se); | 501 | update_stats_curr_start(cfs_rq, se); |
@@ -989,13 +930,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
989 | update_curr(cfs_rq); | 930 | update_curr(cfs_rq); |
990 | place_entity(cfs_rq, se, 1); | 931 | place_entity(cfs_rq, se, 1); |
991 | 932 | ||
992 | /* | ||
993 | * The statistical average of wait_runtime is about | ||
994 | * -granularity/2, so initialize the task with that: | ||
995 | */ | ||
996 | if (sched_feat(START_DEBIT)) | ||
997 | se->wait_runtime = -(__sched_period(cfs_rq->nr_running+1) / 2); | ||
998 | |||
999 | if (sysctl_sched_child_runs_first && | 933 | if (sysctl_sched_child_runs_first && |
1000 | curr->vruntime < se->vruntime) { | 934 | curr->vruntime < se->vruntime) { |
1001 | 935 | ||