diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-10-17 13:27:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-20 08:05:03 -0400 |
commit | a4c2f00f5cb848af7a8c816426b413c8e41834df (patch) | |
tree | f99d376ae43fe7e19db0f5f268c6925d8cda2107 /kernel/sched_fair.c | |
parent | ffda12a17a324103e9900fa1035309811eecbfe5 (diff) |
sched: fair scheduler should not resched rt tasks
With use of ftrace Steven noticed that some RT tasks got rescheduled due
to sched_fair interaction.
What happens is that we reprogram the hrtick from enqueue/dequeue_fair_task()
because that can change nr_running, and thus a current tasks ideal runtime.
However, its possible the current task isn't a fair_sched_class task, and thus
doesn't have a hrtick set to change.
Fix this by wrapping those hrtick_start_fair() calls in a hrtick_update()
function, which will check for the right conditions.
Reported-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 28 |
1 files changed, 24 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 67084936b602..0c4bcac54761 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -73,6 +73,8 @@ unsigned int sysctl_sched_wakeup_granularity = 5000000UL; | |||
73 | 73 | ||
74 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 74 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; |
75 | 75 | ||
76 | static const struct sched_class fair_sched_class; | ||
77 | |||
76 | /************************************************************** | 78 | /************************************************************** |
77 | * CFS operations on generic schedulable entities: | 79 | * CFS operations on generic schedulable entities: |
78 | */ | 80 | */ |
@@ -848,11 +850,31 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | |||
848 | hrtick_start(rq, delta); | 850 | hrtick_start(rq, delta); |
849 | } | 851 | } |
850 | } | 852 | } |
853 | |||
854 | /* | ||
855 | * called from enqueue/dequeue and updates the hrtick when the | ||
856 | * current task is from our class and nr_running is low enough | ||
857 | * to matter. | ||
858 | */ | ||
859 | static void hrtick_update(struct rq *rq) | ||
860 | { | ||
861 | struct task_struct *curr = rq->curr; | ||
862 | |||
863 | if (curr->sched_class != &fair_sched_class) | ||
864 | return; | ||
865 | |||
866 | if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) | ||
867 | hrtick_start_fair(rq, curr); | ||
868 | } | ||
851 | #else /* !CONFIG_SCHED_HRTICK */ | 869 | #else /* !CONFIG_SCHED_HRTICK */ |
852 | static inline void | 870 | static inline void |
853 | hrtick_start_fair(struct rq *rq, struct task_struct *p) | 871 | hrtick_start_fair(struct rq *rq, struct task_struct *p) |
854 | { | 872 | { |
855 | } | 873 | } |
874 | |||
875 | static inline void hrtick_update(struct rq *rq) | ||
876 | { | ||
877 | } | ||
856 | #endif | 878 | #endif |
857 | 879 | ||
858 | /* | 880 | /* |
@@ -873,7 +895,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | |||
873 | wakeup = 1; | 895 | wakeup = 1; |
874 | } | 896 | } |
875 | 897 | ||
876 | hrtick_start_fair(rq, rq->curr); | 898 | hrtick_update(rq); |
877 | } | 899 | } |
878 | 900 | ||
879 | /* | 901 | /* |
@@ -895,7 +917,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | |||
895 | sleep = 1; | 917 | sleep = 1; |
896 | } | 918 | } |
897 | 919 | ||
898 | hrtick_start_fair(rq, rq->curr); | 920 | hrtick_update(rq); |
899 | } | 921 | } |
900 | 922 | ||
901 | /* | 923 | /* |
@@ -1001,8 +1023,6 @@ static inline int wake_idle(int cpu, struct task_struct *p) | |||
1001 | 1023 | ||
1002 | #ifdef CONFIG_SMP | 1024 | #ifdef CONFIG_SMP |
1003 | 1025 | ||
1004 | static const struct sched_class fair_sched_class; | ||
1005 | |||
1006 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1026 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1007 | /* | 1027 | /* |
1008 | * effective_load() calculates the load change as seen from the root_task_group | 1028 | * effective_load() calculates the load change as seen from the root_task_group |