diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 35 |
1 files changed, 30 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0b069bf3e708..ef2b104b254c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
726 | account_cfs_rq_runtime(cfs_rq, delta_exec); | 726 | account_cfs_rq_runtime(cfs_rq, delta_exec); |
727 | } | 727 | } |
728 | 728 | ||
729 | static void update_curr_fair(struct rq *rq) | ||
730 | { | ||
731 | update_curr(cfs_rq_of(&rq->curr->se)); | ||
732 | } | ||
733 | |||
729 | static inline void | 734 | static inline void |
730 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 735 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
731 | { | 736 | { |
@@ -828,11 +833,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p) | |||
828 | 833 | ||
829 | static unsigned int task_scan_min(struct task_struct *p) | 834 | static unsigned int task_scan_min(struct task_struct *p) |
830 | { | 835 | { |
836 | unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size); | ||
831 | unsigned int scan, floor; | 837 | unsigned int scan, floor; |
832 | unsigned int windows = 1; | 838 | unsigned int windows = 1; |
833 | 839 | ||
834 | if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW) | 840 | if (scan_size < MAX_SCAN_WINDOW) |
835 | windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size; | 841 | windows = MAX_SCAN_WINDOW / scan_size; |
836 | floor = 1000 / windows; | 842 | floor = 1000 / windows; |
837 | 843 | ||
838 | scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); | 844 | scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); |
@@ -1164,9 +1170,26 @@ static void task_numa_compare(struct task_numa_env *env, | |||
1164 | long moveimp = imp; | 1170 | long moveimp = imp; |
1165 | 1171 | ||
1166 | rcu_read_lock(); | 1172 | rcu_read_lock(); |
1167 | cur = ACCESS_ONCE(dst_rq->curr); | 1173 | |
1168 | if (cur->pid == 0) /* idle */ | 1174 | raw_spin_lock_irq(&dst_rq->lock); |
1175 | cur = dst_rq->curr; | ||
1176 | /* | ||
1177 | * No need to move the exiting task, and this ensures that ->curr | ||
1178 | * wasn't reaped and thus get_task_struct() in task_numa_assign() | ||
1179 | * is safe under RCU read lock. | ||
1180 | * Note that rcu_read_lock() itself can't protect from the final | ||
1181 | * put_task_struct() after the last schedule(). | ||
1182 | */ | ||
1183 | if ((cur->flags & PF_EXITING) || is_idle_task(cur)) | ||
1169 | cur = NULL; | 1184 | cur = NULL; |
1185 | raw_spin_unlock_irq(&dst_rq->lock); | ||
1186 | |||
1187 | /* | ||
1188 | * Because we have preemption enabled we can get migrated around and | ||
1189 | * end try selecting ourselves (current == env->p) as a swap candidate. | ||
1190 | */ | ||
1191 | if (cur == env->p) | ||
1192 | goto unlock; | ||
1170 | 1193 | ||
1171 | /* | 1194 | /* |
1172 | * "imp" is the fault differential for the source task between the | 1195 | * "imp" is the fault differential for the source task between the |
@@ -1520,7 +1543,7 @@ static void update_task_scan_period(struct task_struct *p, | |||
1520 | * scanning faster if shared accesses dominate as it may | 1543 | * scanning faster if shared accesses dominate as it may |
1521 | * simply bounce migrations uselessly | 1544 | * simply bounce migrations uselessly |
1522 | */ | 1545 | */ |
1523 | ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared)); | 1546 | ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1)); |
1524 | diff = (diff * ratio) / NUMA_PERIOD_SLOTS; | 1547 | diff = (diff * ratio) / NUMA_PERIOD_SLOTS; |
1525 | } | 1548 | } |
1526 | 1549 | ||
@@ -7938,6 +7961,8 @@ const struct sched_class fair_sched_class = { | |||
7938 | 7961 | ||
7939 | .get_rr_interval = get_rr_interval_fair, | 7962 | .get_rr_interval = get_rr_interval_fair, |
7940 | 7963 | ||
7964 | .update_curr = update_curr_fair, | ||
7965 | |||
7941 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7966 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7942 | .task_move_group = task_move_group_fair, | 7967 | .task_move_group = task_move_group_fair, |
7943 | #endif | 7968 | #endif |