diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-28 14:44:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-28 14:44:01 -0500 |
commit | 65314ed08e9c4a94ba85f7d52a7ad324050b152e (patch) | |
tree | b0e7f344889c0306e8c201856a9d834de737329e /kernel | |
parent | 3f26b0c876bbfeed74325ada0329de53efbdf7a6 (diff) | |
parent | 96b777452d8881480fd5be50112f791c17db4b6b (diff) |
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar:
"Two rq-clock warnings related fixes, plus a cgroups related crash fix"
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/cgroup: Move sched_online_group() back into css_online() to fix crash
sched/fair: Update rq clock before changing a task's CPU affinity
sched/core: Fix update_rq_clock() splat on hotplug (and suspend/resume)
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6ea1925ac5c0..bbfb917a9b49 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1090,6 +1090,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, | |||
1090 | int ret = 0; | 1090 | int ret = 0; |
1091 | 1091 | ||
1092 | rq = task_rq_lock(p, &rf); | 1092 | rq = task_rq_lock(p, &rf); |
1093 | update_rq_clock(rq); | ||
1093 | 1094 | ||
1094 | if (p->flags & PF_KTHREAD) { | 1095 | if (p->flags & PF_KTHREAD) { |
1095 | /* | 1096 | /* |
@@ -5560,7 +5561,7 @@ static void migrate_tasks(struct rq *dead_rq) | |||
5560 | { | 5561 | { |
5561 | struct rq *rq = dead_rq; | 5562 | struct rq *rq = dead_rq; |
5562 | struct task_struct *next, *stop = rq->stop; | 5563 | struct task_struct *next, *stop = rq->stop; |
5563 | struct rq_flags rf, old_rf; | 5564 | struct rq_flags rf; |
5564 | int dest_cpu; | 5565 | int dest_cpu; |
5565 | 5566 | ||
5566 | /* | 5567 | /* |
@@ -5579,7 +5580,9 @@ static void migrate_tasks(struct rq *dead_rq) | |||
5579 | * class method both need to have an up-to-date | 5580 | * class method both need to have an up-to-date |
5580 | * value of rq->clock[_task] | 5581 | * value of rq->clock[_task] |
5581 | */ | 5582 | */ |
5583 | rq_pin_lock(rq, &rf); | ||
5582 | update_rq_clock(rq); | 5584 | update_rq_clock(rq); |
5585 | rq_unpin_lock(rq, &rf); | ||
5583 | 5586 | ||
5584 | for (;;) { | 5587 | for (;;) { |
5585 | /* | 5588 | /* |
@@ -5592,7 +5595,7 @@ static void migrate_tasks(struct rq *dead_rq) | |||
5592 | /* | 5595 | /* |
5593 | * pick_next_task() assumes pinned rq->lock: | 5596 | * pick_next_task() assumes pinned rq->lock: |
5594 | */ | 5597 | */ |
5595 | rq_pin_lock(rq, &rf); | 5598 | rq_repin_lock(rq, &rf); |
5596 | next = pick_next_task(rq, &fake_task, &rf); | 5599 | next = pick_next_task(rq, &fake_task, &rf); |
5597 | BUG_ON(!next); | 5600 | BUG_ON(!next); |
5598 | next->sched_class->put_prev_task(rq, next); | 5601 | next->sched_class->put_prev_task(rq, next); |
@@ -5621,13 +5624,6 @@ static void migrate_tasks(struct rq *dead_rq) | |||
5621 | continue; | 5624 | continue; |
5622 | } | 5625 | } |
5623 | 5626 | ||
5624 | /* | ||
5625 | * __migrate_task() may return with a different | ||
5626 | * rq->lock held and a new cookie in 'rf', but we need | ||
5627 | * to preserve rf::clock_update_flags for 'dead_rq'. | ||
5628 | */ | ||
5629 | old_rf = rf; | ||
5630 | |||
5631 | /* Find suitable destination for @next, with force if needed. */ | 5627 | /* Find suitable destination for @next, with force if needed. */ |
5632 | dest_cpu = select_fallback_rq(dead_rq->cpu, next); | 5628 | dest_cpu = select_fallback_rq(dead_rq->cpu, next); |
5633 | 5629 | ||
@@ -5636,7 +5632,6 @@ static void migrate_tasks(struct rq *dead_rq) | |||
5636 | raw_spin_unlock(&rq->lock); | 5632 | raw_spin_unlock(&rq->lock); |
5637 | rq = dead_rq; | 5633 | rq = dead_rq; |
5638 | raw_spin_lock(&rq->lock); | 5634 | raw_spin_lock(&rq->lock); |
5639 | rf = old_rf; | ||
5640 | } | 5635 | } |
5641 | raw_spin_unlock(&next->pi_lock); | 5636 | raw_spin_unlock(&next->pi_lock); |
5642 | } | 5637 | } |
@@ -6819,11 +6814,20 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | |||
6819 | if (IS_ERR(tg)) | 6814 | if (IS_ERR(tg)) |
6820 | return ERR_PTR(-ENOMEM); | 6815 | return ERR_PTR(-ENOMEM); |
6821 | 6816 | ||
6822 | sched_online_group(tg, parent); | ||
6823 | |||
6824 | return &tg->css; | 6817 | return &tg->css; |
6825 | } | 6818 | } |
6826 | 6819 | ||
6820 | /* Expose task group only after completing cgroup initialization */ | ||
6821 | static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) | ||
6822 | { | ||
6823 | struct task_group *tg = css_tg(css); | ||
6824 | struct task_group *parent = css_tg(css->parent); | ||
6825 | |||
6826 | if (parent) | ||
6827 | sched_online_group(tg, parent); | ||
6828 | return 0; | ||
6829 | } | ||
6830 | |||
6827 | static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) | 6831 | static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) |
6828 | { | 6832 | { |
6829 | struct task_group *tg = css_tg(css); | 6833 | struct task_group *tg = css_tg(css); |
@@ -7229,6 +7233,7 @@ static struct cftype cpu_files[] = { | |||
7229 | 7233 | ||
7230 | struct cgroup_subsys cpu_cgrp_subsys = { | 7234 | struct cgroup_subsys cpu_cgrp_subsys = { |
7231 | .css_alloc = cpu_cgroup_css_alloc, | 7235 | .css_alloc = cpu_cgroup_css_alloc, |
7236 | .css_online = cpu_cgroup_css_online, | ||
7232 | .css_released = cpu_cgroup_css_released, | 7237 | .css_released = cpu_cgroup_css_released, |
7233 | .css_free = cpu_cgroup_css_free, | 7238 | .css_free = cpu_cgroup_css_free, |
7234 | .fork = cpu_cgroup_fork, | 7239 | .fork = cpu_cgroup_fork, |