diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 25 |
1 files changed, 8 insertions, 17 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 8a5e7632d09b..88d3053ac7c2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -757,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
757 | se->vruntime = vruntime; | 757 | se->vruntime = vruntime; |
758 | } | 758 | } |
759 | 759 | ||
760 | #define ENQUEUE_WAKEUP 1 | ||
761 | #define ENQUEUE_MIGRATE 2 | ||
762 | |||
763 | static void | 760 | static void |
764 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | 761 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
765 | { | 762 | { |
@@ -767,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
767 | * Update the normalized vruntime before updating min_vruntime | 764 | * Update the normalized vruntime before updating min_vruntime |
768 | * through callig update_curr(). | 765 | * through callig update_curr(). |
769 | */ | 766 | */ |
770 | if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE)) | 767 | if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING)) |
771 | se->vruntime += cfs_rq->min_vruntime; | 768 | se->vruntime += cfs_rq->min_vruntime; |
772 | 769 | ||
773 | /* | 770 | /* |
@@ -803,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
803 | } | 800 | } |
804 | 801 | ||
805 | static void | 802 | static void |
806 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 803 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
807 | { | 804 | { |
808 | /* | 805 | /* |
809 | * Update run-time statistics of the 'current'. | 806 | * Update run-time statistics of the 'current'. |
@@ -811,7 +808,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
811 | update_curr(cfs_rq); | 808 | update_curr(cfs_rq); |
812 | 809 | ||
813 | update_stats_dequeue(cfs_rq, se); | 810 | update_stats_dequeue(cfs_rq, se); |
814 | if (sleep) { | 811 | if (flags & DEQUEUE_SLEEP) { |
815 | #ifdef CONFIG_SCHEDSTATS | 812 | #ifdef CONFIG_SCHEDSTATS |
816 | if (entity_is_task(se)) { | 813 | if (entity_is_task(se)) { |
817 | struct task_struct *tsk = task_of(se); | 814 | struct task_struct *tsk = task_of(se); |
@@ -836,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
836 | * update can refer to the ->curr item and we need to reflect this | 833 | * update can refer to the ->curr item and we need to reflect this |
837 | * movement in our normalized position. | 834 | * movement in our normalized position. |
838 | */ | 835 | */ |
839 | if (!sleep) | 836 | if (!(flags & DEQUEUE_SLEEP)) |
840 | se->vruntime -= cfs_rq->min_vruntime; | 837 | se->vruntime -= cfs_rq->min_vruntime; |
841 | } | 838 | } |
842 | 839 | ||
@@ -1045,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq) | |||
1045 | * then put the task into the rbtree: | 1042 | * then put the task into the rbtree: |
1046 | */ | 1043 | */ |
1047 | static void | 1044 | static void |
1048 | enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head) | 1045 | enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
1049 | { | 1046 | { |
1050 | struct cfs_rq *cfs_rq; | 1047 | struct cfs_rq *cfs_rq; |
1051 | struct sched_entity *se = &p->se; | 1048 | struct sched_entity *se = &p->se; |
1052 | int flags = 0; | ||
1053 | |||
1054 | if (wakeup) | ||
1055 | flags |= ENQUEUE_WAKEUP; | ||
1056 | if (p->state == TASK_WAKING) | ||
1057 | flags |= ENQUEUE_MIGRATE; | ||
1058 | 1049 | ||
1059 | for_each_sched_entity(se) { | 1050 | for_each_sched_entity(se) { |
1060 | if (se->on_rq) | 1051 | if (se->on_rq) |
@@ -1072,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head) | |||
1072 | * decreased. We remove the task from the rbtree and | 1063 | * decreased. We remove the task from the rbtree and |
1073 | * update the fair scheduling stats: | 1064 | * update the fair scheduling stats: |
1074 | */ | 1065 | */ |
1075 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | 1066 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
1076 | { | 1067 | { |
1077 | struct cfs_rq *cfs_rq; | 1068 | struct cfs_rq *cfs_rq; |
1078 | struct sched_entity *se = &p->se; | 1069 | struct sched_entity *se = &p->se; |
1079 | 1070 | ||
1080 | for_each_sched_entity(se) { | 1071 | for_each_sched_entity(se) { |
1081 | cfs_rq = cfs_rq_of(se); | 1072 | cfs_rq = cfs_rq_of(se); |
1082 | dequeue_entity(cfs_rq, se, sleep); | 1073 | dequeue_entity(cfs_rq, se, flags); |
1083 | /* Don't dequeue parent if it has other entities besides us */ | 1074 | /* Don't dequeue parent if it has other entities besides us */ |
1084 | if (cfs_rq->load.weight) | 1075 | if (cfs_rq->load.weight) |
1085 | break; | 1076 | break; |
1086 | sleep = 1; | 1077 | flags |= DEQUEUE_SLEEP; |
1087 | } | 1078 | } |
1088 | 1079 | ||
1089 | hrtick_update(rq); | 1080 | hrtick_update(rq); |