aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-31 18:44:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-31 18:44:04 -0500
commit7ab85d4a85160ea2ffc96b1255443cbc83be180f (patch)
tree65ccb76940c7574d96d235cd2860ea7473610e8b /kernel
parent29d14f083522e5bc762256f68227d267118946c8 (diff)
parent840d6fe7425ffb6a62d53b2759e01ae6daf90e4e (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner: "Three small fixes in the scheduler/core: - use after free in the numa code - crash in the numa init code - a simple spelling fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: pid: Fix spelling in comments sched/numa: Fix use-after-free bug in the task_numa_compare sched: Fix crash in sched_init_numa()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c30
3 files changed, 25 insertions, 9 deletions
diff --git a/kernel/pid.c b/kernel/pid.c
index f4ad91b746f1..4d73a834c7e6 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -588,7 +588,7 @@ void __init pidhash_init(void)
588 588
589void __init pidmap_init(void) 589void __init pidmap_init(void)
590{ 590{
591 /* Veryify no one has done anything silly */ 591 /* Verify no one has done anything silly: */
592 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING); 592 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
593 593
594 /* bump default and minimum pid_max based on number of cpus */ 594 /* bump default and minimum pid_max based on number of cpus */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 63d3a24e081a..9503d590e5ef 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6840,7 +6840,7 @@ static void sched_init_numa(void)
6840 6840
6841 sched_domains_numa_masks[i][j] = mask; 6841 sched_domains_numa_masks[i][j] = mask;
6842 6842
6843 for (k = 0; k < nr_node_ids; k++) { 6843 for_each_node(k) {
6844 if (node_distance(j, k) > sched_domains_numa_distance[i]) 6844 if (node_distance(j, k) > sched_domains_numa_distance[i])
6845 continue; 6845 continue;
6846 6846
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1926606ece80..56b7d4b83947 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1220,8 +1220,6 @@ static void task_numa_assign(struct task_numa_env *env,
1220{ 1220{
1221 if (env->best_task) 1221 if (env->best_task)
1222 put_task_struct(env->best_task); 1222 put_task_struct(env->best_task);
1223 if (p)
1224 get_task_struct(p);
1225 1223
1226 env->best_task = p; 1224 env->best_task = p;
1227 env->best_imp = imp; 1225 env->best_imp = imp;
@@ -1289,20 +1287,30 @@ static void task_numa_compare(struct task_numa_env *env,
1289 long imp = env->p->numa_group ? groupimp : taskimp; 1287 long imp = env->p->numa_group ? groupimp : taskimp;
1290 long moveimp = imp; 1288 long moveimp = imp;
1291 int dist = env->dist; 1289 int dist = env->dist;
1290 bool assigned = false;
1292 1291
1293 rcu_read_lock(); 1292 rcu_read_lock();
1294 1293
1295 raw_spin_lock_irq(&dst_rq->lock); 1294 raw_spin_lock_irq(&dst_rq->lock);
1296 cur = dst_rq->curr; 1295 cur = dst_rq->curr;
1297 /* 1296 /*
1298 * No need to move the exiting task, and this ensures that ->curr 1297 * No need to move the exiting task or idle task.
1299 * wasn't reaped and thus get_task_struct() in task_numa_assign()
1300 * is safe under RCU read lock.
1301 * Note that rcu_read_lock() itself can't protect from the final
1302 * put_task_struct() after the last schedule().
1303 */ 1298 */
1304 if ((cur->flags & PF_EXITING) || is_idle_task(cur)) 1299 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
1305 cur = NULL; 1300 cur = NULL;
1301 else {
1302 /*
1303 * The task_struct must be protected here to protect the
1304 * p->numa_faults access in the task_weight since the
1305 * numa_faults could already be freed in the following path:
1306 * finish_task_switch()
1307 * --> put_task_struct()
1308 * --> __put_task_struct()
1309 * --> task_numa_free()
1310 */
1311 get_task_struct(cur);
1312 }
1313
1306 raw_spin_unlock_irq(&dst_rq->lock); 1314 raw_spin_unlock_irq(&dst_rq->lock);
1307 1315
1308 /* 1316 /*
@@ -1386,6 +1394,7 @@ balance:
1386 */ 1394 */
1387 if (!load_too_imbalanced(src_load, dst_load, env)) { 1395 if (!load_too_imbalanced(src_load, dst_load, env)) {
1388 imp = moveimp - 1; 1396 imp = moveimp - 1;
1397 put_task_struct(cur);
1389 cur = NULL; 1398 cur = NULL;
1390 goto assign; 1399 goto assign;
1391 } 1400 }
@@ -1411,9 +1420,16 @@ balance:
1411 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu); 1420 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1412 1421
1413assign: 1422assign:
1423 assigned = true;
1414 task_numa_assign(env, cur, imp); 1424 task_numa_assign(env, cur, imp);
1415unlock: 1425unlock:
1416 rcu_read_unlock(); 1426 rcu_read_unlock();
1427 /*
1428 * The dst_rq->curr isn't assigned. The protection for task_struct is
1429 * finished.
1430 */
1431 if (cur && !assigned)
1432 put_task_struct(cur);
1417} 1433}
1418 1434
1419static void task_numa_find_cpu(struct task_numa_env *env, 1435static void task_numa_find_cpu(struct task_numa_env *env,