diff options
author | Dmitry Adamushko <dmitry.adamushko@gmail.com> | 2007-10-15 11:00:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:07 -0400 |
commit | 30cfdcfc5f180fc21a3dad6ae3b7b2a9ee112186 (patch) | |
tree | 7178cde2a1d2b924cc0b66031f4eb02482fe54c2 | |
parent | 7074badbcb4212d404a243e5c50efeb778ec3fc6 (diff) |
sched: do not keep current in the tree and get rid of sched_entity::fair_key
Get rid of 'sched_entity::fair_key'.
As a side effect, 'current' is not kept withing the tree for
SCHED_NORMAL/BATCH tasks anymore. This simplifies some parts of code
(e.g. entity_tick() and yield_task_fair()) and also somewhat optimizes
them (e.g. a single update_curr() now vs. dequeue/enqueue() before in
entity_tick()).
Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 1 | ||||
-rw-r--r-- | kernel/sched_debug.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 54 |
4 files changed, 36 insertions, 22 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 572df1bbaeec..f776a30b403e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -891,7 +891,6 @@ struct load_weight { | |||
891 | * 6 se->load.weight | 891 | * 6 se->load.weight |
892 | */ | 892 | */ |
893 | struct sched_entity { | 893 | struct sched_entity { |
894 | s64 fair_key; | ||
895 | struct load_weight load; /* for load-balancing */ | 894 | struct load_weight load; /* for load-balancing */ |
896 | struct rb_node run_node; | 895 | struct rb_node run_node; |
897 | unsigned int on_rq; | 896 | unsigned int on_rq; |
diff --git a/kernel/sched.c b/kernel/sched.c index 6d1892192e21..3b104635a8ea 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6610,7 +6610,6 @@ void normalize_rt_tasks(void) | |||
6610 | 6610 | ||
6611 | read_lock_irq(&tasklist_lock); | 6611 | read_lock_irq(&tasklist_lock); |
6612 | do_each_thread(g, p) { | 6612 | do_each_thread(g, p) { |
6613 | p->se.fair_key = 0; | ||
6614 | p->se.exec_start = 0; | 6613 | p->se.exec_start = 0; |
6615 | #ifdef CONFIG_SCHEDSTATS | 6614 | #ifdef CONFIG_SCHEDSTATS |
6616 | p->se.wait_start = 0; | 6615 | p->se.wait_start = 0; |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index e3b62324ac31..bb34b8188f61 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -38,7 +38,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
38 | 38 | ||
39 | SEQ_printf(m, "%15s %5d %15Ld %13Ld %5d ", | 39 | SEQ_printf(m, "%15s %5d %15Ld %13Ld %5d ", |
40 | p->comm, p->pid, | 40 | p->comm, p->pid, |
41 | (long long)p->se.fair_key, | 41 | (long long)p->se.vruntime, |
42 | (long long)(p->nvcsw + p->nivcsw), | 42 | (long long)(p->nvcsw + p->nivcsw), |
43 | p->prio); | 43 | p->prio); |
44 | #ifdef CONFIG_SCHEDSTATS | 44 | #ifdef CONFIG_SCHEDSTATS |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 144f3ef97380..b9e426a8a09f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -139,7 +139,7 @@ set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost) | |||
139 | static inline s64 | 139 | static inline s64 |
140 | entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | 140 | entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) |
141 | { | 141 | { |
142 | return se->fair_key - cfs_rq->min_vruntime; | 142 | return se->vruntime - cfs_rq->min_vruntime; |
143 | } | 143 | } |
144 | 144 | ||
145 | /* | 145 | /* |
@@ -181,9 +181,6 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
181 | 181 | ||
182 | rb_link_node(&se->run_node, parent, link); | 182 | rb_link_node(&se->run_node, parent, link); |
183 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 183 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); |
184 | update_load_add(&cfs_rq->load, se->load.weight); | ||
185 | cfs_rq->nr_running++; | ||
186 | se->on_rq = 1; | ||
187 | } | 184 | } |
188 | 185 | ||
189 | static void | 186 | static void |
@@ -193,9 +190,6 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
193 | set_leftmost(cfs_rq, rb_next(&se->run_node)); | 190 | set_leftmost(cfs_rq, rb_next(&se->run_node)); |
194 | 191 | ||
195 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 192 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); |
196 | update_load_sub(&cfs_rq->load, se->load.weight); | ||
197 | cfs_rq->nr_running--; | ||
198 | se->on_rq = 0; | ||
199 | } | 193 | } |
200 | 194 | ||
201 | static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) | 195 | static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) |
@@ -341,10 +335,6 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
341 | */ | 335 | */ |
342 | if (se != cfs_rq->curr) | 336 | if (se != cfs_rq->curr) |
343 | update_stats_wait_start(cfs_rq, se); | 337 | update_stats_wait_start(cfs_rq, se); |
344 | /* | ||
345 | * Update the key: | ||
346 | */ | ||
347 | se->fair_key = se->vruntime; | ||
348 | } | 338 | } |
349 | 339 | ||
350 | static void | 340 | static void |
@@ -392,6 +382,22 @@ update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
392 | * Scheduling class queueing methods: | 382 | * Scheduling class queueing methods: |
393 | */ | 383 | */ |
394 | 384 | ||
385 | static void | ||
386 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
387 | { | ||
388 | update_load_add(&cfs_rq->load, se->load.weight); | ||
389 | cfs_rq->nr_running++; | ||
390 | se->on_rq = 1; | ||
391 | } | ||
392 | |||
393 | static void | ||
394 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
395 | { | ||
396 | update_load_sub(&cfs_rq->load, se->load.weight); | ||
397 | cfs_rq->nr_running--; | ||
398 | se->on_rq = 0; | ||
399 | } | ||
400 | |||
395 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 401 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
396 | { | 402 | { |
397 | #ifdef CONFIG_SCHEDSTATS | 403 | #ifdef CONFIG_SCHEDSTATS |
@@ -479,7 +485,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
479 | } | 485 | } |
480 | 486 | ||
481 | update_stats_enqueue(cfs_rq, se); | 487 | update_stats_enqueue(cfs_rq, se); |
482 | __enqueue_entity(cfs_rq, se); | 488 | if (se != cfs_rq->curr) |
489 | __enqueue_entity(cfs_rq, se); | ||
490 | account_entity_enqueue(cfs_rq, se); | ||
483 | } | 491 | } |
484 | 492 | ||
485 | static void | 493 | static void |
@@ -498,7 +506,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
498 | } | 506 | } |
499 | } | 507 | } |
500 | #endif | 508 | #endif |
501 | __dequeue_entity(cfs_rq, se); | 509 | if (se != cfs_rq->curr) |
510 | __dequeue_entity(cfs_rq, se); | ||
511 | account_entity_dequeue(cfs_rq, se); | ||
502 | } | 512 | } |
503 | 513 | ||
504 | /* | 514 | /* |
@@ -544,6 +554,10 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | |||
544 | { | 554 | { |
545 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 555 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
546 | 556 | ||
557 | /* 'current' is not kept within the tree. */ | ||
558 | if (se) | ||
559 | __dequeue_entity(cfs_rq, se); | ||
560 | |||
547 | set_next_entity(cfs_rq, se); | 561 | set_next_entity(cfs_rq, se); |
548 | 562 | ||
549 | return se; | 563 | return se; |
@@ -560,19 +574,20 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | |||
560 | 574 | ||
561 | update_stats_curr_end(cfs_rq, prev); | 575 | update_stats_curr_end(cfs_rq, prev); |
562 | 576 | ||
563 | if (prev->on_rq) | 577 | if (prev->on_rq) { |
564 | update_stats_wait_start(cfs_rq, prev); | 578 | update_stats_wait_start(cfs_rq, prev); |
579 | /* Put 'current' back into the tree. */ | ||
580 | __enqueue_entity(cfs_rq, prev); | ||
581 | } | ||
565 | cfs_rq->curr = NULL; | 582 | cfs_rq->curr = NULL; |
566 | } | 583 | } |
567 | 584 | ||
568 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 585 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) |
569 | { | 586 | { |
570 | /* | 587 | /* |
571 | * Dequeue and enqueue the task to update its | 588 | * Update run-time statistics of the 'current'. |
572 | * position within the tree: | ||
573 | */ | 589 | */ |
574 | dequeue_entity(cfs_rq, curr, 0); | 590 | update_curr(cfs_rq); |
575 | enqueue_entity(cfs_rq, curr, 0); | ||
576 | 591 | ||
577 | if (cfs_rq->nr_running > 1) | 592 | if (cfs_rq->nr_running > 1) |
578 | check_preempt_tick(cfs_rq, curr); | 593 | check_preempt_tick(cfs_rq, curr); |
@@ -749,7 +764,7 @@ static void yield_task_fair(struct rq *rq, struct task_struct *p) | |||
749 | /* | 764 | /* |
750 | * Minimally necessary key value to be last in the tree: | 765 | * Minimally necessary key value to be last in the tree: |
751 | */ | 766 | */ |
752 | se->fair_key = rightmost->fair_key + 1; | 767 | se->vruntime = rightmost->vruntime + 1; |
753 | 768 | ||
754 | if (cfs_rq->rb_leftmost == &se->run_node) | 769 | if (cfs_rq->rb_leftmost == &se->run_node) |
755 | cfs_rq->rb_leftmost = rb_next(&se->run_node); | 770 | cfs_rq->rb_leftmost = rb_next(&se->run_node); |
@@ -965,6 +980,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
965 | 980 | ||
966 | update_stats_enqueue(cfs_rq, se); | 981 | update_stats_enqueue(cfs_rq, se); |
967 | __enqueue_entity(cfs_rq, se); | 982 | __enqueue_entity(cfs_rq, se); |
983 | account_entity_enqueue(cfs_rq, se); | ||
968 | resched_task(rq->curr); | 984 | resched_task(rq->curr); |
969 | } | 985 | } |
970 | 986 | ||