diff options
Diffstat (limited to 'kernel/sched_fair.c')
| -rw-r--r-- | kernel/sched_fair.c | 53 |
1 files changed, 47 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5bedf6e3ebf3..42ac3c9f66f6 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -510,6 +510,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
| 510 | curr->sum_exec_runtime += delta_exec; | 510 | curr->sum_exec_runtime += delta_exec; |
| 511 | schedstat_add(cfs_rq, exec_clock, delta_exec); | 511 | schedstat_add(cfs_rq, exec_clock, delta_exec); |
| 512 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | 512 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); |
| 513 | |||
| 513 | curr->vruntime += delta_exec_weighted; | 514 | curr->vruntime += delta_exec_weighted; |
| 514 | update_min_vruntime(cfs_rq); | 515 | update_min_vruntime(cfs_rq); |
| 515 | } | 516 | } |
| @@ -765,16 +766,26 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
| 765 | se->vruntime = vruntime; | 766 | se->vruntime = vruntime; |
| 766 | } | 767 | } |
| 767 | 768 | ||
| 769 | #define ENQUEUE_WAKEUP 1 | ||
| 770 | #define ENQUEUE_MIGRATE 2 | ||
| 771 | |||
| 768 | static void | 772 | static void |
| 769 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | 773 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
| 770 | { | 774 | { |
| 771 | /* | 775 | /* |
| 776 | * Update the normalized vruntime before updating min_vruntime | ||
| 777 | * through callig update_curr(). | ||
| 778 | */ | ||
| 779 | if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE)) | ||
| 780 | se->vruntime += cfs_rq->min_vruntime; | ||
| 781 | |||
| 782 | /* | ||
| 772 | * Update run-time statistics of the 'current'. | 783 | * Update run-time statistics of the 'current'. |
| 773 | */ | 784 | */ |
| 774 | update_curr(cfs_rq); | 785 | update_curr(cfs_rq); |
| 775 | account_entity_enqueue(cfs_rq, se); | 786 | account_entity_enqueue(cfs_rq, se); |
| 776 | 787 | ||
| 777 | if (wakeup) { | 788 | if (flags & ENQUEUE_WAKEUP) { |
| 778 | place_entity(cfs_rq, se, 0); | 789 | place_entity(cfs_rq, se, 0); |
| 779 | enqueue_sleeper(cfs_rq, se); | 790 | enqueue_sleeper(cfs_rq, se); |
| 780 | } | 791 | } |
| @@ -828,6 +839,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
| 828 | __dequeue_entity(cfs_rq, se); | 839 | __dequeue_entity(cfs_rq, se); |
| 829 | account_entity_dequeue(cfs_rq, se); | 840 | account_entity_dequeue(cfs_rq, se); |
| 830 | update_min_vruntime(cfs_rq); | 841 | update_min_vruntime(cfs_rq); |
| 842 | |||
| 843 | /* | ||
| 844 | * Normalize the entity after updating the min_vruntime because the | ||
| 845 | * update can refer to the ->curr item and we need to reflect this | ||
| 846 | * movement in our normalized position. | ||
| 847 | */ | ||
| 848 | if (!sleep) | ||
| 849 | se->vruntime -= cfs_rq->min_vruntime; | ||
| 831 | } | 850 | } |
| 832 | 851 | ||
| 833 | /* | 852 | /* |
| @@ -1038,13 +1057,19 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | |||
| 1038 | { | 1057 | { |
| 1039 | struct cfs_rq *cfs_rq; | 1058 | struct cfs_rq *cfs_rq; |
| 1040 | struct sched_entity *se = &p->se; | 1059 | struct sched_entity *se = &p->se; |
| 1060 | int flags = 0; | ||
| 1061 | |||
| 1062 | if (wakeup) | ||
| 1063 | flags |= ENQUEUE_WAKEUP; | ||
| 1064 | if (p->state == TASK_WAKING) | ||
| 1065 | flags |= ENQUEUE_MIGRATE; | ||
| 1041 | 1066 | ||
| 1042 | for_each_sched_entity(se) { | 1067 | for_each_sched_entity(se) { |
| 1043 | if (se->on_rq) | 1068 | if (se->on_rq) |
| 1044 | break; | 1069 | break; |
| 1045 | cfs_rq = cfs_rq_of(se); | 1070 | cfs_rq = cfs_rq_of(se); |
| 1046 | enqueue_entity(cfs_rq, se, wakeup); | 1071 | enqueue_entity(cfs_rq, se, flags); |
| 1047 | wakeup = 1; | 1072 | flags = ENQUEUE_WAKEUP; |
| 1048 | } | 1073 | } |
| 1049 | 1074 | ||
| 1050 | hrtick_update(rq); | 1075 | hrtick_update(rq); |
| @@ -1120,6 +1145,14 @@ static void yield_task_fair(struct rq *rq) | |||
| 1120 | 1145 | ||
| 1121 | #ifdef CONFIG_SMP | 1146 | #ifdef CONFIG_SMP |
| 1122 | 1147 | ||
| 1148 | static void task_waking_fair(struct rq *rq, struct task_struct *p) | ||
| 1149 | { | ||
| 1150 | struct sched_entity *se = &p->se; | ||
| 1151 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||
| 1152 | |||
| 1153 | se->vruntime -= cfs_rq->min_vruntime; | ||
| 1154 | } | ||
| 1155 | |||
| 1123 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1156 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1124 | /* | 1157 | /* |
| 1125 | * effective_load() calculates the load change as seen from the root_task_group | 1158 | * effective_load() calculates the load change as seen from the root_task_group |
| @@ -1429,6 +1462,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
| 1429 | } | 1462 | } |
| 1430 | 1463 | ||
| 1431 | for_each_domain(cpu, tmp) { | 1464 | for_each_domain(cpu, tmp) { |
| 1465 | if (!(tmp->flags & SD_LOAD_BALANCE)) | ||
| 1466 | continue; | ||
| 1467 | |||
| 1432 | /* | 1468 | /* |
| 1433 | * If power savings logic is enabled for a domain, see if we | 1469 | * If power savings logic is enabled for a domain, see if we |
| 1434 | * are not overloaded, if so, don't balance wider. | 1470 | * are not overloaded, if so, don't balance wider. |
| @@ -1975,6 +2011,8 @@ static void task_fork_fair(struct task_struct *p) | |||
| 1975 | resched_task(rq->curr); | 2011 | resched_task(rq->curr); |
| 1976 | } | 2012 | } |
| 1977 | 2013 | ||
| 2014 | se->vruntime -= cfs_rq->min_vruntime; | ||
| 2015 | |||
| 1978 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 2016 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 1979 | } | 2017 | } |
| 1980 | 2018 | ||
| @@ -2028,12 +2066,13 @@ static void set_curr_task_fair(struct rq *rq) | |||
| 2028 | } | 2066 | } |
| 2029 | 2067 | ||
| 2030 | #ifdef CONFIG_FAIR_GROUP_SCHED | 2068 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 2031 | static void moved_group_fair(struct task_struct *p) | 2069 | static void moved_group_fair(struct task_struct *p, int on_rq) |
| 2032 | { | 2070 | { |
| 2033 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 2071 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
| 2034 | 2072 | ||
| 2035 | update_curr(cfs_rq); | 2073 | update_curr(cfs_rq); |
| 2036 | place_entity(cfs_rq, &p->se, 1); | 2074 | if (!on_rq) |
| 2075 | place_entity(cfs_rq, &p->se, 1); | ||
| 2037 | } | 2076 | } |
| 2038 | #endif | 2077 | #endif |
| 2039 | 2078 | ||
| @@ -2073,6 +2112,8 @@ static const struct sched_class fair_sched_class = { | |||
| 2073 | .move_one_task = move_one_task_fair, | 2112 | .move_one_task = move_one_task_fair, |
| 2074 | .rq_online = rq_online_fair, | 2113 | .rq_online = rq_online_fair, |
| 2075 | .rq_offline = rq_offline_fair, | 2114 | .rq_offline = rq_offline_fair, |
| 2115 | |||
| 2116 | .task_waking = task_waking_fair, | ||
| 2076 | #endif | 2117 | #endif |
| 2077 | 2118 | ||
| 2078 | .set_curr_task = set_curr_task_fair, | 2119 | .set_curr_task = set_curr_task_fair, |
