aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 11:16:20 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-11 12:32:50 -0500
commita64692a3afd85fe048551ab89142fd5ca99a0dbd (patch)
tree7d2800efb7fb9e3aa5c99ab883004932fdc362c6
parente12f31d3e5d36328c7fbd0fce40a95e70b59152c (diff)
sched: Cleanup/optimize clock updates
Now that we no longer depend on the clock being updated prior to enqueueing on migratory wakeup, we can clean up a bit, placing calls to update_rq_clock() exactly where they are needed, ie on enqueue, dequeue and schedule events. In the case of a freshly enqueued task immediately preempting, we can skip the update during preemption, as the clock was just updated by the enqueue event. We also save an unneeded call during a migratory wakeup by not updating the previous runqueue, where update_curr() won't be invoked. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301199.6785.32.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c32
-rw-r--r--kernel/sched_fair.c2
2 files changed, 16 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 68ed6f4f3c13..16559de4edea 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -495,6 +495,8 @@ struct rq {
495 u64 nohz_stamp; 495 u64 nohz_stamp;
496 unsigned char in_nohz_recently; 496 unsigned char in_nohz_recently;
497#endif 497#endif
498 unsigned int skip_clock_update;
499
498 /* capture load from *all* tasks on this cpu: */ 500 /* capture load from *all* tasks on this cpu: */
499 struct load_weight load; 501 struct load_weight load;
500 unsigned long nr_load_updates; 502 unsigned long nr_load_updates;
@@ -592,6 +594,13 @@ static inline
592void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 594void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
593{ 595{
594 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 596 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
597
598 /*
599 * A queue event has occurred, and we're going to schedule. In
600 * this case, we can save a useless back to back clock update.
601 */
602 if (test_tsk_need_resched(p))
603 rq->skip_clock_update = 1;
595} 604}
596 605
597static inline int cpu_of(struct rq *rq) 606static inline int cpu_of(struct rq *rq)
@@ -626,7 +635,8 @@ static inline int cpu_of(struct rq *rq)
626 635
627inline void update_rq_clock(struct rq *rq) 636inline void update_rq_clock(struct rq *rq)
628{ 637{
629 rq->clock = sched_clock_cpu(cpu_of(rq)); 638 if (!rq->skip_clock_update)
639 rq->clock = sched_clock_cpu(cpu_of(rq));
630} 640}
631 641
632/* 642/*
@@ -1782,8 +1792,6 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1782 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1792 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1783 } 1793 }
1784 } 1794 }
1785 update_rq_clock(rq1);
1786 update_rq_clock(rq2);
1787} 1795}
1788 1796
1789/* 1797/*
@@ -1880,6 +1888,7 @@ static void update_avg(u64 *avg, u64 sample)
1880static void 1888static void
1881enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) 1889enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1882{ 1890{
1891 update_rq_clock(rq);
1883 sched_info_queued(p); 1892 sched_info_queued(p);
1884 p->sched_class->enqueue_task(rq, p, wakeup, head); 1893 p->sched_class->enqueue_task(rq, p, wakeup, head);
1885 p->se.on_rq = 1; 1894 p->se.on_rq = 1;
@@ -1887,6 +1896,7 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1887 1896
1888static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1897static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1889{ 1898{
1899 update_rq_clock(rq);
1890 sched_info_dequeued(p); 1900 sched_info_dequeued(p);
1891 p->sched_class->dequeue_task(rq, p, sleep); 1901 p->sched_class->dequeue_task(rq, p, sleep);
1892 p->se.on_rq = 0; 1902 p->se.on_rq = 0;
@@ -2366,7 +2376,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2366 2376
2367 smp_wmb(); 2377 smp_wmb();
2368 rq = task_rq_lock(p, &flags); 2378 rq = task_rq_lock(p, &flags);
2369 update_rq_clock(rq);
2370 if (!(p->state & state)) 2379 if (!(p->state & state))
2371 goto out; 2380 goto out;
2372 2381
@@ -2407,7 +2416,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2407 2416
2408 rq = cpu_rq(cpu); 2417 rq = cpu_rq(cpu);
2409 raw_spin_lock(&rq->lock); 2418 raw_spin_lock(&rq->lock);
2410 update_rq_clock(rq);
2411 2419
2412 /* 2420 /*
2413 * We migrated the task without holding either rq->lock, however 2421 * We migrated the task without holding either rq->lock, however
@@ -2624,7 +2632,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2624 2632
2625 BUG_ON(p->state != TASK_WAKING); 2633 BUG_ON(p->state != TASK_WAKING);
2626 p->state = TASK_RUNNING; 2634 p->state = TASK_RUNNING;
2627 update_rq_clock(rq);
2628 activate_task(rq, p, 0); 2635 activate_task(rq, p, 0);
2629 trace_sched_wakeup_new(rq, p, 1); 2636 trace_sched_wakeup_new(rq, p, 1);
2630 check_preempt_curr(rq, p, WF_FORK); 2637 check_preempt_curr(rq, p, WF_FORK);
@@ -3578,6 +3585,9 @@ static inline void schedule_debug(struct task_struct *prev)
3578 3585
3579static void put_prev_task(struct rq *rq, struct task_struct *prev) 3586static void put_prev_task(struct rq *rq, struct task_struct *prev)
3580{ 3587{
3588 if (prev->se.on_rq)
3589 update_rq_clock(rq);
3590 rq->skip_clock_update = 0;
3581 prev->sched_class->put_prev_task(rq, prev); 3591 prev->sched_class->put_prev_task(rq, prev);
3582} 3592}
3583 3593
@@ -3640,7 +3650,6 @@ need_resched_nonpreemptible:
3640 hrtick_clear(rq); 3650 hrtick_clear(rq);
3641 3651
3642 raw_spin_lock_irq(&rq->lock); 3652 raw_spin_lock_irq(&rq->lock);
3643 update_rq_clock(rq);
3644 clear_tsk_need_resched(prev); 3653 clear_tsk_need_resched(prev);
3645 3654
3646 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3655 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
@@ -4197,7 +4206,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4197 BUG_ON(prio < 0 || prio > MAX_PRIO); 4206 BUG_ON(prio < 0 || prio > MAX_PRIO);
4198 4207
4199 rq = task_rq_lock(p, &flags); 4208 rq = task_rq_lock(p, &flags);
4200 update_rq_clock(rq);
4201 4209
4202 oldprio = p->prio; 4210 oldprio = p->prio;
4203 prev_class = p->sched_class; 4211 prev_class = p->sched_class;
@@ -4240,7 +4248,6 @@ void set_user_nice(struct task_struct *p, long nice)
4240 * the task might be in the middle of scheduling on another CPU. 4248 * the task might be in the middle of scheduling on another CPU.
4241 */ 4249 */
4242 rq = task_rq_lock(p, &flags); 4250 rq = task_rq_lock(p, &flags);
4243 update_rq_clock(rq);
4244 /* 4251 /*
4245 * The RT priorities are set via sched_setscheduler(), but we still 4252 * The RT priorities are set via sched_setscheduler(), but we still
4246 * allow the 'normal' nice value to be set - but as expected 4253 * allow the 'normal' nice value to be set - but as expected
@@ -4523,7 +4530,6 @@ recheck:
4523 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4530 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4524 goto recheck; 4531 goto recheck;
4525 } 4532 }
4526 update_rq_clock(rq);
4527 on_rq = p->se.on_rq; 4533 on_rq = p->se.on_rq;
4528 running = task_current(rq, p); 4534 running = task_current(rq, p);
4529 if (on_rq) 4535 if (on_rq)
@@ -5530,7 +5536,6 @@ void sched_idle_next(void)
5530 5536
5531 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 5537 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5532 5538
5533 update_rq_clock(rq);
5534 activate_task(rq, p, 0); 5539 activate_task(rq, p, 0);
5535 5540
5536 raw_spin_unlock_irqrestore(&rq->lock, flags); 5541 raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -5585,7 +5590,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
5585 for ( ; ; ) { 5590 for ( ; ; ) {
5586 if (!rq->nr_running) 5591 if (!rq->nr_running)
5587 break; 5592 break;
5588 update_rq_clock(rq);
5589 next = pick_next_task(rq); 5593 next = pick_next_task(rq);
5590 if (!next) 5594 if (!next)
5591 break; 5595 break;
@@ -5869,7 +5873,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5869 rq->migration_thread = NULL; 5873 rq->migration_thread = NULL;
5870 /* Idle task back to normal (off runqueue, low prio) */ 5874 /* Idle task back to normal (off runqueue, low prio) */
5871 raw_spin_lock_irq(&rq->lock); 5875 raw_spin_lock_irq(&rq->lock);
5872 update_rq_clock(rq);
5873 deactivate_task(rq, rq->idle, 0); 5876 deactivate_task(rq, rq->idle, 0);
5874 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 5877 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5875 rq->idle->sched_class = &idle_sched_class; 5878 rq->idle->sched_class = &idle_sched_class;
@@ -7815,7 +7818,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7815{ 7818{
7816 int on_rq; 7819 int on_rq;
7817 7820
7818 update_rq_clock(rq);
7819 on_rq = p->se.on_rq; 7821 on_rq = p->se.on_rq;
7820 if (on_rq) 7822 if (on_rq)
7821 deactivate_task(rq, p, 0); 7823 deactivate_task(rq, p, 0);
@@ -8177,8 +8179,6 @@ void sched_move_task(struct task_struct *tsk)
8177 8179
8178 rq = task_rq_lock(tsk, &flags); 8180 rq = task_rq_lock(tsk, &flags);
8179 8181
8180 update_rq_clock(rq);
8181
8182 running = task_current(rq, tsk); 8182 running = task_current(rq, tsk);
8183 on_rq = tsk->se.on_rq; 8183 on_rq = tsk->se.on_rq;
8184 8184
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c3b69d4b5d65..69e582020ff8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3064,8 +3064,6 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3064 3064
3065 /* move a task from busiest_rq to target_rq */ 3065 /* move a task from busiest_rq to target_rq */
3066 double_lock_balance(busiest_rq, target_rq); 3066 double_lock_balance(busiest_rq, target_rq);
3067 update_rq_clock(busiest_rq);
3068 update_rq_clock(target_rq);
3069 3067
3070 /* Search for an sd spanning us and the target CPU. */ 3068 /* Search for an sd spanning us and the target CPU. */
3071 for_each_domain(target_cpu, sd) { 3069 for_each_domain(target_cpu, sd) {