aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:06 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:06 -0400
commitbbdba7c0e1161934ae881ad00e4db49830f5ef59 (patch)
tree1c5c5e9c9c0c6d6cb72b843121e7a38f2768356a /kernel/sched.c
parente22f5bbf86d8cce710d5c8ba5bf57832e73aab8c (diff)
sched: remove wait_runtime fields and features
remove wait_runtime based fields and features, now that the CFS math has been changed over to the vruntime metric. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c38
1 files changed, 5 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 21cc3b2be023..0f0cf374c775 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -176,11 +176,8 @@ struct cfs_rq {
176 struct load_weight load; 176 struct load_weight load;
177 unsigned long nr_running; 177 unsigned long nr_running;
178 178
179 s64 fair_clock;
180 u64 exec_clock; 179 u64 exec_clock;
181 u64 min_vruntime; 180 u64 min_vruntime;
182 s64 wait_runtime;
183 unsigned long wait_runtime_overruns, wait_runtime_underruns;
184 181
185 struct rb_root tasks_timeline; 182 struct rb_root tasks_timeline;
186 struct rb_node *rb_leftmost; 183 struct rb_node *rb_leftmost;
@@ -389,20 +386,14 @@ static void update_rq_clock(struct rq *rq)
389 * Debugging: various feature bits 386 * Debugging: various feature bits
390 */ 387 */
391enum { 388enum {
392 SCHED_FEAT_FAIR_SLEEPERS = 1, 389 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
393 SCHED_FEAT_NEW_FAIR_SLEEPERS = 2, 390 SCHED_FEAT_START_DEBIT = 2,
394 SCHED_FEAT_SLEEPER_AVG = 4, 391 SCHED_FEAT_USE_TREE_AVG = 4,
395 SCHED_FEAT_SLEEPER_LOAD_AVG = 8, 392 SCHED_FEAT_APPROX_AVG = 8,
396 SCHED_FEAT_START_DEBIT = 16,
397 SCHED_FEAT_USE_TREE_AVG = 32,
398 SCHED_FEAT_APPROX_AVG = 64,
399}; 393};
400 394
401const_debug unsigned int sysctl_sched_features = 395const_debug unsigned int sysctl_sched_features =
402 SCHED_FEAT_FAIR_SLEEPERS *0 |
403 SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | 396 SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
404 SCHED_FEAT_SLEEPER_AVG *0 |
405 SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
406 SCHED_FEAT_START_DEBIT *1 | 397 SCHED_FEAT_START_DEBIT *1 |
407 SCHED_FEAT_USE_TREE_AVG *0 | 398 SCHED_FEAT_USE_TREE_AVG *0 |
408 SCHED_FEAT_APPROX_AVG *0; 399 SCHED_FEAT_APPROX_AVG *0;
@@ -716,15 +707,11 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
716static inline void update_load_add(struct load_weight *lw, unsigned long inc) 707static inline void update_load_add(struct load_weight *lw, unsigned long inc)
717{ 708{
718 lw->weight += inc; 709 lw->weight += inc;
719 if (sched_feat(FAIR_SLEEPERS))
720 lw->inv_weight = WMULT_CONST / lw->weight;
721} 710}
722 711
723static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 712static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
724{ 713{
725 lw->weight -= dec; 714 lw->weight -= dec;
726 if (sched_feat(FAIR_SLEEPERS) && likely(lw->weight))
727 lw->inv_weight = WMULT_CONST / lw->weight;
728} 715}
729 716
730/* 717/*
@@ -848,8 +835,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)
848 835
849static void set_load_weight(struct task_struct *p) 836static void set_load_weight(struct task_struct *p)
850{ 837{
851 p->se.wait_runtime = 0;
852
853 if (task_has_rt_policy(p)) { 838 if (task_has_rt_policy(p)) {
854 p->se.load.weight = prio_to_weight[0] * 2; 839 p->se.load.weight = prio_to_weight[0] * 2;
855 p->se.load.inv_weight = prio_to_wmult[0] >> 1; 840 p->se.load.inv_weight = prio_to_wmult[0] >> 1;
@@ -995,13 +980,9 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
995{ 980{
996 int old_cpu = task_cpu(p); 981 int old_cpu = task_cpu(p);
997 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu); 982 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
998 u64 clock_offset, fair_clock_offset; 983 u64 clock_offset;
999 984
1000 clock_offset = old_rq->clock - new_rq->clock; 985 clock_offset = old_rq->clock - new_rq->clock;
1001 fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;
1002
1003 if (p->se.wait_start_fair)
1004 p->se.wait_start_fair -= fair_clock_offset;
1005 986
1006#ifdef CONFIG_SCHEDSTATS 987#ifdef CONFIG_SCHEDSTATS
1007 if (p->se.wait_start) 988 if (p->se.wait_start)
@@ -1571,15 +1552,12 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state)
1571 */ 1552 */
1572static void __sched_fork(struct task_struct *p) 1553static void __sched_fork(struct task_struct *p)
1573{ 1554{
1574 p->se.wait_start_fair = 0;
1575 p->se.exec_start = 0; 1555 p->se.exec_start = 0;
1576 p->se.sum_exec_runtime = 0; 1556 p->se.sum_exec_runtime = 0;
1577 p->se.prev_sum_exec_runtime = 0; 1557 p->se.prev_sum_exec_runtime = 0;
1578 p->se.wait_runtime = 0;
1579 1558
1580#ifdef CONFIG_SCHEDSTATS 1559#ifdef CONFIG_SCHEDSTATS
1581 p->se.wait_start = 0; 1560 p->se.wait_start = 0;
1582 p->se.sum_wait_runtime = 0;
1583 p->se.sum_sleep_runtime = 0; 1561 p->se.sum_sleep_runtime = 0;
1584 p->se.sleep_start = 0; 1562 p->se.sleep_start = 0;
1585 p->se.block_start = 0; 1563 p->se.block_start = 0;
@@ -1588,8 +1566,6 @@ static void __sched_fork(struct task_struct *p)
1588 p->se.exec_max = 0; 1566 p->se.exec_max = 0;
1589 p->se.slice_max = 0; 1567 p->se.slice_max = 0;
1590 p->se.wait_max = 0; 1568 p->se.wait_max = 0;
1591 p->se.wait_runtime_overruns = 0;
1592 p->se.wait_runtime_underruns = 0;
1593#endif 1569#endif
1594 1570
1595 INIT_LIST_HEAD(&p->run_list); 1571 INIT_LIST_HEAD(&p->run_list);
@@ -6436,7 +6412,6 @@ int in_sched_functions(unsigned long addr)
6436static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) 6412static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
6437{ 6413{
6438 cfs_rq->tasks_timeline = RB_ROOT; 6414 cfs_rq->tasks_timeline = RB_ROOT;
6439 cfs_rq->fair_clock = 1;
6440#ifdef CONFIG_FAIR_GROUP_SCHED 6415#ifdef CONFIG_FAIR_GROUP_SCHED
6441 cfs_rq->rq = rq; 6416 cfs_rq->rq = rq;
6442#endif 6417#endif
@@ -6562,15 +6537,12 @@ void normalize_rt_tasks(void)
6562 read_lock_irq(&tasklist_lock); 6537 read_lock_irq(&tasklist_lock);
6563 do_each_thread(g, p) { 6538 do_each_thread(g, p) {
6564 p->se.fair_key = 0; 6539 p->se.fair_key = 0;
6565 p->se.wait_runtime = 0;
6566 p->se.exec_start = 0; 6540 p->se.exec_start = 0;
6567 p->se.wait_start_fair = 0;
6568#ifdef CONFIG_SCHEDSTATS 6541#ifdef CONFIG_SCHEDSTATS
6569 p->se.wait_start = 0; 6542 p->se.wait_start = 0;
6570 p->se.sleep_start = 0; 6543 p->se.sleep_start = 0;
6571 p->se.block_start = 0; 6544 p->se.block_start = 0;
6572#endif 6545#endif
6573 task_rq(p)->cfs.fair_clock = 0;
6574 task_rq(p)->clock = 0; 6546 task_rq(p)->clock = 0;
6575 6547
6576 if (!rt_task(p)) { 6548 if (!rt_task(p)) {