diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 21 |
1 files changed, 16 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0b069bf3e708..34baa60f8a7b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -828,11 +828,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p) | |||
828 | 828 | ||
829 | static unsigned int task_scan_min(struct task_struct *p) | 829 | static unsigned int task_scan_min(struct task_struct *p) |
830 | { | 830 | { |
831 | unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size); | ||
831 | unsigned int scan, floor; | 832 | unsigned int scan, floor; |
832 | unsigned int windows = 1; | 833 | unsigned int windows = 1; |
833 | 834 | ||
834 | if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW) | 835 | if (scan_size < MAX_SCAN_WINDOW) |
835 | windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size; | 836 | windows = MAX_SCAN_WINDOW / scan_size; |
836 | floor = 1000 / windows; | 837 | floor = 1000 / windows; |
837 | 838 | ||
838 | scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); | 839 | scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); |
@@ -1164,9 +1165,19 @@ static void task_numa_compare(struct task_numa_env *env, | |||
1164 | long moveimp = imp; | 1165 | long moveimp = imp; |
1165 | 1166 | ||
1166 | rcu_read_lock(); | 1167 | rcu_read_lock(); |
1167 | cur = ACCESS_ONCE(dst_rq->curr); | 1168 | |
1168 | if (cur->pid == 0) /* idle */ | 1169 | raw_spin_lock_irq(&dst_rq->lock); |
1170 | cur = dst_rq->curr; | ||
1171 | /* | ||
1172 | * No need to move the exiting task, and this ensures that ->curr | ||
1173 | * wasn't reaped and thus get_task_struct() in task_numa_assign() | ||
1174 | * is safe under RCU read lock. | ||
1175 | * Note that rcu_read_lock() itself can't protect from the final | ||
1176 | * put_task_struct() after the last schedule(). | ||
1177 | */ | ||
1178 | if ((cur->flags & PF_EXITING) || is_idle_task(cur)) | ||
1169 | cur = NULL; | 1179 | cur = NULL; |
1180 | raw_spin_unlock_irq(&dst_rq->lock); | ||
1170 | 1181 | ||
1171 | /* | 1182 | /* |
1172 | * "imp" is the fault differential for the source task between the | 1183 | * "imp" is the fault differential for the source task between the |
@@ -1520,7 +1531,7 @@ static void update_task_scan_period(struct task_struct *p, | |||
1520 | * scanning faster if shared accesses dominate as it may | 1531 | * scanning faster if shared accesses dominate as it may |
1521 | * simply bounce migrations uselessly | 1532 | * simply bounce migrations uselessly |
1522 | */ | 1533 | */ |
1523 | ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared)); | 1534 | ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1)); |
1524 | diff = (diff * ratio) / NUMA_PERIOD_SLOTS; | 1535 | diff = (diff * ratio) / NUMA_PERIOD_SLOTS; |
1525 | } | 1536 | } |
1526 | 1537 | ||