diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 30 |
1 files changed, 23 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1926606ece80..56b7d4b83947 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1220,8 +1220,6 @@ static void task_numa_assign(struct task_numa_env *env, | |||
1220 | { | 1220 | { |
1221 | if (env->best_task) | 1221 | if (env->best_task) |
1222 | put_task_struct(env->best_task); | 1222 | put_task_struct(env->best_task); |
1223 | if (p) | ||
1224 | get_task_struct(p); | ||
1225 | 1223 | ||
1226 | env->best_task = p; | 1224 | env->best_task = p; |
1227 | env->best_imp = imp; | 1225 | env->best_imp = imp; |
@@ -1289,20 +1287,30 @@ static void task_numa_compare(struct task_numa_env *env, | |||
1289 | long imp = env->p->numa_group ? groupimp : taskimp; | 1287 | long imp = env->p->numa_group ? groupimp : taskimp; |
1290 | long moveimp = imp; | 1288 | long moveimp = imp; |
1291 | int dist = env->dist; | 1289 | int dist = env->dist; |
1290 | bool assigned = false; | ||
1292 | 1291 | ||
1293 | rcu_read_lock(); | 1292 | rcu_read_lock(); |
1294 | 1293 | ||
1295 | raw_spin_lock_irq(&dst_rq->lock); | 1294 | raw_spin_lock_irq(&dst_rq->lock); |
1296 | cur = dst_rq->curr; | 1295 | cur = dst_rq->curr; |
1297 | /* | 1296 | /* |
1298 | * No need to move the exiting task, and this ensures that ->curr | 1297 | * No need to move the exiting task or idle task. |
1299 | * wasn't reaped and thus get_task_struct() in task_numa_assign() | ||
1300 | * is safe under RCU read lock. | ||
1301 | * Note that rcu_read_lock() itself can't protect from the final | ||
1302 | * put_task_struct() after the last schedule(). | ||
1303 | */ | 1298 | */ |
1304 | if ((cur->flags & PF_EXITING) || is_idle_task(cur)) | 1299 | if ((cur->flags & PF_EXITING) || is_idle_task(cur)) |
1305 | cur = NULL; | 1300 | cur = NULL; |
1301 | else { | ||
1302 | /* | ||
1303 | * The task_struct must be protected here to protect the | ||
1304 | * p->numa_faults access in the task_weight since the | ||
1305 | * numa_faults could already be freed in the following path: | ||
1306 | * finish_task_switch() | ||
1307 | * --> put_task_struct() | ||
1308 | * --> __put_task_struct() | ||
1309 | * --> task_numa_free() | ||
1310 | */ | ||
1311 | get_task_struct(cur); | ||
1312 | } | ||
1313 | |||
1306 | raw_spin_unlock_irq(&dst_rq->lock); | 1314 | raw_spin_unlock_irq(&dst_rq->lock); |
1307 | 1315 | ||
1308 | /* | 1316 | /* |
@@ -1386,6 +1394,7 @@ balance: | |||
1386 | */ | 1394 | */ |
1387 | if (!load_too_imbalanced(src_load, dst_load, env)) { | 1395 | if (!load_too_imbalanced(src_load, dst_load, env)) { |
1388 | imp = moveimp - 1; | 1396 | imp = moveimp - 1; |
1397 | put_task_struct(cur); | ||
1389 | cur = NULL; | 1398 | cur = NULL; |
1390 | goto assign; | 1399 | goto assign; |
1391 | } | 1400 | } |
@@ -1411,9 +1420,16 @@ balance: | |||
1411 | env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu); | 1420 | env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu); |
1412 | 1421 | ||
1413 | assign: | 1422 | assign: |
1423 | assigned = true; | ||
1414 | task_numa_assign(env, cur, imp); | 1424 | task_numa_assign(env, cur, imp); |
1415 | unlock: | 1425 | unlock: |
1416 | rcu_read_unlock(); | 1426 | rcu_read_unlock(); |
1427 | /* | ||
1428 | * The dst_rq->curr isn't assigned. The protection for task_struct is | ||
1429 | * finished. | ||
1430 | */ | ||
1431 | if (cur && !assigned) | ||
1432 | put_task_struct(cur); | ||
1417 | } | 1433 | } |
1418 | 1434 | ||
1419 | static void task_numa_find_cpu(struct task_numa_env *env, | 1435 | static void task_numa_find_cpu(struct task_numa_env *env, |