From b910472dd3b7c1d51af9a594a759f642520c33e1 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Tue, 8 Nov 2005 21:38:55 -0800 Subject: [PATCH] sched: implement nice support across physical cpus on SMP This patch implements 'nice' support across physical cpus on SMP. It introduces an extra runqueue variable prio_bias which is the sum of the (inverted) static priorities of all the tasks on the runqueue. This is then used to bias busy rebalancing between runqueues to obtain good distribution of tasks of different nice values. By biasing the balancing only during busy rebalancing we can avoid having any significant loss of throughput by not affecting the carefully tuned idle balancing already in place. If all tasks are running at the same nice level this code should also have minimal effect. The code is optimised out in the !CONFIG_SMP case. Signed-off-by: Con Kolivas Cc: Ingo Molnar Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 83 insertions(+), 18 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index 3ce26954be12..c6827f94e156 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -206,6 +206,7 @@ struct runqueue { */ unsigned long nr_running; #ifdef CONFIG_SMP + unsigned long prio_bias; unsigned long cpu_load[3]; #endif unsigned long long nr_switches; @@ -659,13 +660,45 @@ static int effective_prio(task_t *p) return prio; } +#ifdef CONFIG_SMP +static inline void inc_prio_bias(runqueue_t *rq, int static_prio) +{ + rq->prio_bias += MAX_PRIO - static_prio; +} + +static inline void dec_prio_bias(runqueue_t *rq, int static_prio) +{ + rq->prio_bias -= MAX_PRIO - static_prio; +} +#else +static inline void inc_prio_bias(runqueue_t *rq, int static_prio) +{ +} + +static inline void dec_prio_bias(runqueue_t *rq, int static_prio) +{ +} +#endif + +static inline void inc_nr_running(task_t *p, runqueue_t *rq) +{ + rq->nr_running++; + inc_prio_bias(rq, p->static_prio); +} + +static inline void dec_nr_running(task_t *p, runqueue_t *rq) +{ + rq->nr_running--; + dec_prio_bias(rq, p->static_prio); +} + /* * __activate_task - move a task to the runqueue. */ static inline void __activate_task(task_t *p, runqueue_t *rq) { enqueue_task(p, rq->active); - rq->nr_running++; + inc_nr_running(p, rq); } /* @@ -674,7 +707,7 @@ static inline void __activate_task(task_t *p, runqueue_t *rq) static inline void __activate_idle_task(task_t *p, runqueue_t *rq) { enqueue_task_head(p, rq->active); - rq->nr_running++; + inc_nr_running(p, rq); } static int recalc_task_prio(task_t *p, unsigned long long now) @@ -793,7 +826,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) */ static void deactivate_task(struct task_struct *p, runqueue_t *rq) { - rq->nr_running--; + dec_nr_running(p, rq); dequeue_task(p, p->array); p->array = NULL; } @@ -930,27 +963,54 @@ void kick_process(task_t *p) * We want to under-estimate the load of migration sources, to * balance conservatively. */ -static inline unsigned long source_load(int cpu, int type) +static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); - unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; + unsigned long cpu_load = rq->cpu_load[type-1], + load_now = rq->nr_running * SCHED_LOAD_SCALE; + + if (idle == NOT_IDLE) { + /* + * If we are balancing busy runqueues the load is biased by + * priority to create 'nice' support across cpus. + */ + cpu_load *= rq->prio_bias; + load_now *= rq->prio_bias; + } + if (type == 0) return load_now; - return min(rq->cpu_load[type-1], load_now); + return min(cpu_load, load_now); +} + +static inline unsigned long source_load(int cpu, int type) +{ + return __source_load(cpu, type, NOT_IDLE); } /* * Return a high guess at the load of a migration-target cpu */ -static inline unsigned long target_load(int cpu, int type) +static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); - unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; + unsigned long cpu_load = rq->cpu_load[type-1], + load_now = rq->nr_running * SCHED_LOAD_SCALE; + if (type == 0) return load_now; - return max(rq->cpu_load[type-1], load_now); + if (idle == NOT_IDLE) { + cpu_load *= rq->prio_bias; + load_now *= rq->prio_bias; + } + return max(cpu_load, load_now); +} + +static inline unsigned long target_load(int cpu, int type) +{ + return __target_load(cpu, type, NOT_IDLE); } /* @@ -1411,7 +1471,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) list_add_tail(&p->run_list, ¤t->run_list); p->array = current->array; p->array->nr_active++; - rq->nr_running++; + inc_nr_running(p, rq); } set_need_resched(); } else @@ -1756,9 +1816,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) { dequeue_task(p, src_array); - src_rq->nr_running--; + dec_nr_running(p, src_rq); set_task_cpu(p, this_cpu); - this_rq->nr_running++; + inc_nr_running(p, this_rq); enqueue_task(p, this_array); p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) + this_rq->timestamp_last_tick; @@ -1937,9 +1997,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, /* Bias balancing toward cpus of our domain */ if (local_group) - load = target_load(i, load_idx); + load = __target_load(i, load_idx, idle); else - load = source_load(i, load_idx); + load = __source_load(i, load_idx, idle); avg_load += load; } @@ -2044,14 +2104,15 @@ out_balanced: /* * find_busiest_queue - find the busiest runqueue among the cpus in group. */ -static runqueue_t *find_busiest_queue(struct sched_group *group) +static runqueue_t *find_busiest_queue(struct sched_group *group, + enum idle_type idle) { unsigned long load, max_load = 0; runqueue_t *busiest = NULL; int i; for_each_cpu_mask(i, group->cpumask) { - load = source_load(i, 0); + load = __source_load(i, 0, idle); if (load > max_load) { max_load = load; @@ -2095,7 +2156,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, goto out_balanced; } - busiest = find_busiest_queue(group); + busiest = find_busiest_queue(group, idle); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; @@ -2218,7 +2279,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, goto out_balanced; } - busiest = find_busiest_queue(group); + busiest = find_busiest_queue(group, NEWLY_IDLE); if (!busiest) { schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); goto out_balanced; @@ -3447,7 +3508,9 @@ void set_user_nice(task_t *p, long nice) * not SCHED_NORMAL: */ if (rt_task(p)) { + dec_prio_bias(rq, p->static_prio); p->static_prio = NICE_TO_PRIO(nice); + inc_prio_bias(rq, p->static_prio); goto out_unlock; } array = p->array; @@ -3457,7 +3520,9 @@ void set_user_nice(task_t *p, long nice) old_prio = p->prio; new_prio = NICE_TO_PRIO(nice); delta = new_prio - old_prio; + dec_prio_bias(rq, p->static_prio); p->static_prio = NICE_TO_PRIO(nice); + inc_prio_bias(rq, p->static_prio); p->prio += delta; if (array) { -- cgit v1.2.2 From 738a2ccbcf8c2c1b039f1e76662dce60b22b694b Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Tue, 8 Nov 2005 21:38:56 -0800 Subject: [PATCH] sched: change prio bias only if queued prio_bias should only be adjusted in set_user_nice if p is actually currently queued. Signed-off-by: Con Kolivas Cc: Ingo Molnar Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index c6827f94e156..e1f57bd5aaa6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3508,25 +3508,24 @@ void set_user_nice(task_t *p, long nice) * not SCHED_NORMAL: */ if (rt_task(p)) { - dec_prio_bias(rq, p->static_prio); p->static_prio = NICE_TO_PRIO(nice); - inc_prio_bias(rq, p->static_prio); goto out_unlock; } array = p->array; - if (array) + if (array) { dequeue_task(p, array); + dec_prio_bias(rq, p->static_prio); + } old_prio = p->prio; new_prio = NICE_TO_PRIO(nice); delta = new_prio - old_prio; - dec_prio_bias(rq, p->static_prio); p->static_prio = NICE_TO_PRIO(nice); - inc_prio_bias(rq, p->static_prio); p->prio += delta; if (array) { enqueue_task(p, array); + inc_prio_bias(rq, p->static_prio); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: -- cgit v1.2.2 From dad1c65c8000f4485d8602e1875ded77e0d72133 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Tue, 8 Nov 2005 21:38:57 -0800 Subject: [PATCH] sched: account rt tasks in prio_bias() Real time tasks' effect on prio_bias should be based on their real time priority level instead of their static_prio which is based on nice. Signed-off-by: Con Kolivas Cc: Ingo Molnar Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index e1f57bd5aaa6..d9dbf8ee6ca4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -661,21 +661,21 @@ static int effective_prio(task_t *p) } #ifdef CONFIG_SMP -static inline void inc_prio_bias(runqueue_t *rq, int static_prio) +static inline void inc_prio_bias(runqueue_t *rq, int prio) { - rq->prio_bias += MAX_PRIO - static_prio; + rq->prio_bias += MAX_PRIO - prio; } -static inline void dec_prio_bias(runqueue_t *rq, int static_prio) +static inline void dec_prio_bias(runqueue_t *rq, int prio) { - rq->prio_bias -= MAX_PRIO - static_prio; + rq->prio_bias -= MAX_PRIO - prio; } #else -static inline void inc_prio_bias(runqueue_t *rq, int static_prio) +static inline void inc_prio_bias(runqueue_t *rq, int prio) { } -static inline void dec_prio_bias(runqueue_t *rq, int static_prio) +static inline void dec_prio_bias(runqueue_t *rq, int prio) { } #endif @@ -683,13 +683,19 @@ static inline void dec_prio_bias(runqueue_t *rq, int static_prio) static inline void inc_nr_running(task_t *p, runqueue_t *rq) { rq->nr_running++; - inc_prio_bias(rq, p->static_prio); + if (rt_task(p)) + inc_prio_bias(rq, p->prio); + else + inc_prio_bias(rq, p->static_prio); } static inline void dec_nr_running(task_t *p, runqueue_t *rq) { rq->nr_running--; - dec_prio_bias(rq, p->static_prio); + if (rt_task(p)) + dec_prio_bias(rq, p->prio); + else + dec_prio_bias(rq, p->static_prio); } /* -- cgit v1.2.2 From 3b0bd9bc6f3b8a47853d1b1de4520de3878e8941 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Tue, 8 Nov 2005 21:38:58 -0800 Subject: [PATCH] sched: smp nice bias busy queues on idle rebalance To intensify the 'nice' support across physical cpus on SMP we can bias the loads on idle rebalancing. To prevent idle rebalance from trying to pull tasks from queues that appear heavily loaded we only bias the load if there is more than one task running. Add some minor micro-optimisations and have only one return from __source_load and __target_load functions. Fix the fact that target_load was not biased by priority when type == 0. Signed-off-by: Con Kolivas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index d9dbf8ee6ca4..ec9ea9119b98 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -972,22 +972,26 @@ void kick_process(task_t *p) static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); - unsigned long cpu_load = rq->cpu_load[type-1], + unsigned long source_load, cpu_load = rq->cpu_load[type-1], load_now = rq->nr_running * SCHED_LOAD_SCALE; - if (idle == NOT_IDLE) { + if (type == 0) + source_load = load_now; + else + source_load = min(cpu_load, load_now); + + if (idle == NOT_IDLE || rq->nr_running > 1) /* - * If we are balancing busy runqueues the load is biased by - * priority to create 'nice' support across cpus. + * If we are busy rebalancing the load is biased by + * priority to create 'nice' support across cpus. When + * idle rebalancing we should only bias the source_load if + * there is more than one task running on that queue to + * prevent idle rebalance from trying to pull tasks from a + * queue with only one running task. */ - cpu_load *= rq->prio_bias; - load_now *= rq->prio_bias; - } + source_load *= rq->prio_bias; - if (type == 0) - return load_now; - - return min(cpu_load, load_now); + return source_load; } static inline unsigned long source_load(int cpu, int type) @@ -1001,17 +1005,18 @@ static inline unsigned long source_load(int cpu, int type) static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); - unsigned long cpu_load = rq->cpu_load[type-1], + unsigned long target_load, cpu_load = rq->cpu_load[type-1], load_now = rq->nr_running * SCHED_LOAD_SCALE; if (type == 0) - return load_now; + target_load = load_now; + else + target_load = max(cpu_load, load_now); - if (idle == NOT_IDLE) { - cpu_load *= rq->prio_bias; - load_now *= rq->prio_bias; - } - return max(cpu_load, load_now); + if (idle == NOT_IDLE || rq->nr_running > 1) + target_load *= rq->prio_bias; + + return target_load; } static inline unsigned long target_load(int cpu, int type) -- cgit v1.2.2 From 6dd4a85bb3ee0715415892c8b0f2a9bd08d31ca4 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Tue, 8 Nov 2005 21:38:59 -0800 Subject: [PATCH] sched: correct smp_nice_bias The priority biasing was off by mutliplying the total load by the total priority bias and this ruins the ratio of loads between runqueues. This patch should correct the ratios of loads between runqueues to be proportional to overall load. -2nd attempt. From: Dave Kleikamp This patch fixes a divide-by-zero error that I hit on a two-way i386 machine. rq->nr_running is tested to be non-zero, but may change by the time it is used in the division. Saving the value to a local variable ensures that the same value that is checked is used in the division. Signed-off-by: Con Kolivas Signed-off-by: Dave Kleikamp Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index ec9ea9119b98..502d47c883b6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -972,15 +972,16 @@ void kick_process(task_t *p) static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); + unsigned long running = rq->nr_running; unsigned long source_load, cpu_load = rq->cpu_load[type-1], - load_now = rq->nr_running * SCHED_LOAD_SCALE; + load_now = running * SCHED_LOAD_SCALE; if (type == 0) source_load = load_now; else source_load = min(cpu_load, load_now); - if (idle == NOT_IDLE || rq->nr_running > 1) + if (running > 1 || (idle == NOT_IDLE && running)) /* * If we are busy rebalancing the load is biased by * priority to create 'nice' support across cpus. When @@ -989,7 +990,7 @@ static inline unsigned long __source_load(int cpu, int type, enum idle_type idle * prevent idle rebalance from trying to pull tasks from a * queue with only one running task. */ - source_load *= rq->prio_bias; + source_load = source_load * rq->prio_bias / running; return source_load; } @@ -1005,16 +1006,17 @@ static inline unsigned long source_load(int cpu, int type) static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); + unsigned long running = rq->nr_running; unsigned long target_load, cpu_load = rq->cpu_load[type-1], - load_now = rq->nr_running * SCHED_LOAD_SCALE; + load_now = running * SCHED_LOAD_SCALE; if (type == 0) target_load = load_now; else target_load = max(cpu_load, load_now); - if (idle == NOT_IDLE || rq->nr_running > 1) - target_load *= rq->prio_bias; + if (running > 1 || (idle == NOT_IDLE && running)) + target_load = target_load * rq->prio_bias / running; return target_load; } -- cgit v1.2.2 From ede3d0fba99520f268067917b50858d788bc41da Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Tue, 8 Nov 2005 21:39:00 -0800 Subject: [PATCH] sched: consider migration thread with smp nice The intermittent scheduling of the migration thread at ultra high priority makes the smp nice handling see that runqueue as being heavily loaded. The migration thread itself actually handles the balancing so its influence on priority balancing should be ignored. Signed-off-by: Con Kolivas Cc: Ingo Molnar Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index 502d47c883b6..0f2def822296 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -670,6 +670,31 @@ static inline void dec_prio_bias(runqueue_t *rq, int prio) { rq->prio_bias -= MAX_PRIO - prio; } + +static inline void inc_nr_running(task_t *p, runqueue_t *rq) +{ + rq->nr_running++; + if (rt_task(p)) { + if (p != rq->migration_thread) + /* + * The migration thread does the actual balancing. Do + * not bias by its priority as the ultra high priority + * will skew balancing adversely. + */ + inc_prio_bias(rq, p->prio); + } else + inc_prio_bias(rq, p->static_prio); +} + +static inline void dec_nr_running(task_t *p, runqueue_t *rq) +{ + rq->nr_running--; + if (rt_task(p)) { + if (p != rq->migration_thread) + dec_prio_bias(rq, p->prio); + } else + dec_prio_bias(rq, p->static_prio); +} #else static inline void inc_prio_bias(runqueue_t *rq, int prio) { @@ -678,25 +703,17 @@ static inline void inc_prio_bias(runqueue_t *rq, int prio) static inline void dec_prio_bias(runqueue_t *rq, int prio) { } -#endif static inline void inc_nr_running(task_t *p, runqueue_t *rq) { rq->nr_running++; - if (rt_task(p)) - inc_prio_bias(rq, p->prio); - else - inc_prio_bias(rq, p->static_prio); } static inline void dec_nr_running(task_t *p, runqueue_t *rq) { rq->nr_running--; - if (rt_task(p)) - dec_prio_bias(rq, p->prio); - else - dec_prio_bias(rq, p->static_prio); } +#endif /* * __activate_task - move a task to the runqueue. -- cgit v1.2.2 From 64c7c8f88559624abdbe12b5da6502e8879f8d28 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 8 Nov 2005 21:39:04 -0800 Subject: [PATCH] sched: resched and cpu_idle rework Make some changes to the NEED_RESCHED and POLLING_NRFLAG to reduce confusion, and make their semantics rigid. Improves efficiency of resched_task and some cpu_idle routines. * In resched_task: - TIF_NEED_RESCHED is only cleared with the task's runqueue lock held, and as we hold it during resched_task, then there is no need for an atomic test and set there. The only other time this should be set is when the task's quantum expires, in the timer interrupt - this is protected against because the rq lock is irq-safe. - If TIF_NEED_RESCHED is set, then we don't need to do anything. It won't get unset until the task get's schedule()d off. - If we are running on the same CPU as the task we resched, then set TIF_NEED_RESCHED and no further action is required. - If we are running on another CPU, and TIF_POLLING_NRFLAG is *not* set after TIF_NEED_RESCHED has been set, then we need to send an IPI. Using these rules, we are able to remove the test and set operation in resched_task, and make clear the previously vague semantics of POLLING_NRFLAG. * In idle routines: - Enter cpu_idle with preempt disabled. When the need_resched() condition becomes true, explicitly call schedule(). This makes things a bit clearer (IMO), but haven't updated all architectures yet. - Many do a test and clear of TIF_NEED_RESCHED for some reason. According to the resched_task rules, this isn't needed (and actually breaks the assumption that TIF_NEED_RESCHED is only cleared with the runqueue lock held). So remove that. Generally one less locked memory op when switching to the idle thread. - Many idle routines clear TIF_POLLING_NRFLAG, and only set it in the inner most polling idle loops. The above resched_task semantics allow it to be set until before the last time need_resched() is checked before going into a halt requiring interrupt wakeup. Many idle routines simply never enter such a halt, and so POLLING_NRFLAG can be always left set, completely eliminating resched IPIs when rescheduling the idle task. POLLING_NRFLAG width can be increased, to reduce the chance of resched IPIs. Signed-off-by: Nick Piggin Cc: Ingo Molnar Cc: Con Kolivas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index 0f2def822296..ac3f5cc3bb51 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -864,21 +864,28 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq) #ifdef CONFIG_SMP static void resched_task(task_t *p) { - int need_resched, nrpolling; + int cpu; assert_spin_locked(&task_rq(p)->lock); - /* minimise the chance of sending an interrupt to poll_idle() */ - nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); - need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED); - nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); + if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) + return; + + set_tsk_thread_flag(p, TIF_NEED_RESCHED); + + cpu = task_cpu(p); + if (cpu == smp_processor_id()) + return; - if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id())) - smp_send_reschedule(task_cpu(p)); + /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */ + smp_mb(); + if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG)) + smp_send_reschedule(cpu); } #else static inline void resched_task(task_t *p) { + assert_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } #endif -- cgit v1.2.2 From a47ab9371e664952b1104a70ec8e9b74db3f7a5f Mon Sep 17 00:00:00 2001 From: "Chen, Kenneth W" Date: Wed, 9 Nov 2005 15:45:29 -0800 Subject: [PATCH] optimize activate_task() recalc_task_prio() is called from activate_task() to calculate dynamic priority and interactive credit for the activating task. For real-time scheduling process, all that dynamic calculation is thrown away at the end because rt priority is fixed. Patch to optimize recalc_task_prio() away for rt processes. Signed-off-by: Ken Chen Acked-by: Ingo Molnar Cc: Nick Piggin Cc: Con Kolivas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index ac3f5cc3bb51..b6506671b2be 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -815,7 +815,8 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) } #endif - p->prio = recalc_task_prio(p, now); + if (!rt_task(p)) + p->prio = recalc_task_prio(p, now); /* * This checks to make sure it's not an uninterruptible task -- cgit v1.2.2