aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2005-09-10 03:26:08 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 13:06:22 -0400
commitfc38ed7531eefa332c8c69ee288487860cd6b426 (patch)
treed358d26559cc33c5d4ef2ed2f9cef8cf2b8ed3ae /kernel
parenta7482a2e7775d163aecd8c95af7bb1b8c83890cc (diff)
[PATCH] sched: run SCHED_NORMAL tasks with real time tasks on SMT siblings
The hyperthread aware nice handling currently puts to sleep any non real time task when a real time task is running on its sibling cpu. This can lead to prolonged starvation by having the non real time task pegged to the cpu with load balancing not pulling that task away. Currently we force lower priority hyperthread tasks to run a percentage of time difference based on timeslice differences which is meaningless when comparing real time tasks to SCHED_NORMAL tasks. We can allow non real time tasks to run with real time tasks on the sibling up to per_cpu_gain% if we use jiffies as a counter. Cleanups and micro-optimisations to the relevant code section should make it more understandable as well. Signed-off-by: Con Kolivas <kernel@kolivas.org> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c65
1 files changed, 47 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 15db82116e19..ef748e691608 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2580,6 +2580,13 @@ out:
2580} 2580}
2581 2581
2582#ifdef CONFIG_SCHED_SMT 2582#ifdef CONFIG_SCHED_SMT
2583static inline void wakeup_busy_runqueue(runqueue_t *rq)
2584{
2585 /* If an SMT runqueue is sleeping due to priority reasons wake it up */
2586 if (rq->curr == rq->idle && rq->nr_running)
2587 resched_task(rq->idle);
2588}
2589
2583static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) 2590static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2584{ 2591{
2585 struct sched_domain *tmp, *sd = NULL; 2592 struct sched_domain *tmp, *sd = NULL;
@@ -2613,12 +2620,7 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2613 for_each_cpu_mask(i, sibling_map) { 2620 for_each_cpu_mask(i, sibling_map) {
2614 runqueue_t *smt_rq = cpu_rq(i); 2621 runqueue_t *smt_rq = cpu_rq(i);
2615 2622
2616 /* 2623 wakeup_busy_runqueue(smt_rq);
2617 * If an SMT sibling task is sleeping due to priority
2618 * reasons wake it up now.
2619 */
2620 if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running)
2621 resched_task(smt_rq->idle);
2622 } 2624 }
2623 2625
2624 for_each_cpu_mask(i, sibling_map) 2626 for_each_cpu_mask(i, sibling_map)
@@ -2672,6 +2674,10 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2672 runqueue_t *smt_rq = cpu_rq(i); 2674 runqueue_t *smt_rq = cpu_rq(i);
2673 task_t *smt_curr = smt_rq->curr; 2675 task_t *smt_curr = smt_rq->curr;
2674 2676
2677 /* Kernel threads do not participate in dependent sleeping */
2678 if (!p->mm || !smt_curr->mm || rt_task(p))
2679 goto check_smt_task;
2680
2675 /* 2681 /*
2676 * If a user task with lower static priority than the 2682 * If a user task with lower static priority than the
2677 * running task on the SMT sibling is trying to schedule, 2683 * running task on the SMT sibling is trying to schedule,
@@ -2680,21 +2686,44 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2680 * task from using an unfair proportion of the 2686 * task from using an unfair proportion of the
2681 * physical cpu's resources. -ck 2687 * physical cpu's resources. -ck
2682 */ 2688 */
2683 if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) > 2689 if (rt_task(smt_curr)) {
2684 task_timeslice(p) || rt_task(smt_curr)) && 2690 /*
2685 p->mm && smt_curr->mm && !rt_task(p)) 2691 * With real time tasks we run non-rt tasks only
2686 ret = 1; 2692 * per_cpu_gain% of the time.
2693 */
2694 if ((jiffies % DEF_TIMESLICE) >
2695 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
2696 ret = 1;
2697 } else
2698 if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) /
2699 100) > task_timeslice(p)))
2700 ret = 1;
2701
2702check_smt_task:
2703 if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
2704 rt_task(smt_curr))
2705 continue;
2706 if (!p->mm) {
2707 wakeup_busy_runqueue(smt_rq);
2708 continue;
2709 }
2687 2710
2688 /* 2711 /*
2689 * Reschedule a lower priority task on the SMT sibling, 2712 * Reschedule a lower priority task on the SMT sibling for
2690 * or wake it up if it has been put to sleep for priority 2713 * it to be put to sleep, or wake it up if it has been put to
2691 * reasons. 2714 * sleep for priority reasons to see if it should run now.
2692 */ 2715 */
2693 if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) > 2716 if (rt_task(p)) {
2694 task_timeslice(smt_curr) || rt_task(p)) && 2717 if ((jiffies % DEF_TIMESLICE) >
2695 smt_curr->mm && p->mm && !rt_task(smt_curr)) || 2718 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
2696 (smt_curr == smt_rq->idle && smt_rq->nr_running)) 2719 resched_task(smt_curr);
2697 resched_task(smt_curr); 2720 } else {
2721 if ((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
2722 task_timeslice(smt_curr))
2723 resched_task(smt_curr);
2724 else
2725 wakeup_busy_runqueue(smt_rq);
2726 }
2698 } 2727 }
2699out_unlock: 2728out_unlock:
2700 for_each_cpu_mask(i, sibling_map) 2729 for_each_cpu_mask(i, sibling_map)