aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-24 11:38:48 -0400
committerIngo Molnar <mingo@elte.hu>2010-04-02 14:12:05 -0400
commit371fd7e7a56a5c136d31aa980011bd2f131c3ef5 (patch)
treecf52014018e8258acd8bcfd486d855f098a02c03 /kernel/sched_fair.c
parentcc87f76a601d2d256118f7bab15e35254356ae21 (diff)
sched: Add enqueue/dequeue flags
In order to reduce the dependency on TASK_WAKING rework the enqueue interface to support a proper flags field. Replace the int wakeup, bool head arguments with an int flags argument and create the following flags: ENQUEUE_WAKEUP - the enqueue is a wakeup of a sleeping task, ENQUEUE_WAKING - the enqueue has relative vruntime due to having sched_class::task_waking() called, ENQUEUE_HEAD - the waking task should be places on the head of the priority queue (where appropriate). For symmetry also convert sched_class::dequeue() to a flags scheme. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c25
1 files changed, 8 insertions, 17 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8a5e7632d09b..88d3053ac7c2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -757,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
757 se->vruntime = vruntime; 757 se->vruntime = vruntime;
758} 758}
759 759
760#define ENQUEUE_WAKEUP 1
761#define ENQUEUE_MIGRATE 2
762
763static void 760static void
764enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 761enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
765{ 762{
@@ -767,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
767 * Update the normalized vruntime before updating min_vruntime 764 * Update the normalized vruntime before updating min_vruntime
768 * through callig update_curr(). 765 * through callig update_curr().
769 */ 766 */
770 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE)) 767 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
771 se->vruntime += cfs_rq->min_vruntime; 768 se->vruntime += cfs_rq->min_vruntime;
772 769
773 /* 770 /*
@@ -803,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
803} 800}
804 801
805static void 802static void
806dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 803dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
807{ 804{
808 /* 805 /*
809 * Update run-time statistics of the 'current'. 806 * Update run-time statistics of the 'current'.
@@ -811,7 +808,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
811 update_curr(cfs_rq); 808 update_curr(cfs_rq);
812 809
813 update_stats_dequeue(cfs_rq, se); 810 update_stats_dequeue(cfs_rq, se);
814 if (sleep) { 811 if (flags & DEQUEUE_SLEEP) {
815#ifdef CONFIG_SCHEDSTATS 812#ifdef CONFIG_SCHEDSTATS
816 if (entity_is_task(se)) { 813 if (entity_is_task(se)) {
817 struct task_struct *tsk = task_of(se); 814 struct task_struct *tsk = task_of(se);
@@ -836,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
836 * update can refer to the ->curr item and we need to reflect this 833 * update can refer to the ->curr item and we need to reflect this
837 * movement in our normalized position. 834 * movement in our normalized position.
838 */ 835 */
839 if (!sleep) 836 if (!(flags & DEQUEUE_SLEEP))
840 se->vruntime -= cfs_rq->min_vruntime; 837 se->vruntime -= cfs_rq->min_vruntime;
841} 838}
842 839
@@ -1045,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq)
1045 * then put the task into the rbtree: 1042 * then put the task into the rbtree:
1046 */ 1043 */
1047static void 1044static void
1048enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head) 1045enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1049{ 1046{
1050 struct cfs_rq *cfs_rq; 1047 struct cfs_rq *cfs_rq;
1051 struct sched_entity *se = &p->se; 1048 struct sched_entity *se = &p->se;
1052 int flags = 0;
1053
1054 if (wakeup)
1055 flags |= ENQUEUE_WAKEUP;
1056 if (p->state == TASK_WAKING)
1057 flags |= ENQUEUE_MIGRATE;
1058 1049
1059 for_each_sched_entity(se) { 1050 for_each_sched_entity(se) {
1060 if (se->on_rq) 1051 if (se->on_rq)
@@ -1072,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1072 * decreased. We remove the task from the rbtree and 1063 * decreased. We remove the task from the rbtree and
1073 * update the fair scheduling stats: 1064 * update the fair scheduling stats:
1074 */ 1065 */
1075static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) 1066static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1076{ 1067{
1077 struct cfs_rq *cfs_rq; 1068 struct cfs_rq *cfs_rq;
1078 struct sched_entity *se = &p->se; 1069 struct sched_entity *se = &p->se;
1079 1070
1080 for_each_sched_entity(se) { 1071 for_each_sched_entity(se) {
1081 cfs_rq = cfs_rq_of(se); 1072 cfs_rq = cfs_rq_of(se);
1082 dequeue_entity(cfs_rq, se, sleep); 1073 dequeue_entity(cfs_rq, se, flags);
1083 /* Don't dequeue parent if it has other entities besides us */ 1074 /* Don't dequeue parent if it has other entities besides us */
1084 if (cfs_rq->load.weight) 1075 if (cfs_rq->load.weight)
1085 break; 1076 break;
1086 sleep = 1; 1077 flags |= DEQUEUE_SLEEP;
1087 } 1078 }
1088 1079
1089 hrtick_update(rq); 1080 hrtick_update(rq);