diff options
| -rw-r--r-- | kernel/sched/core.c | 23 | ||||
| -rw-r--r-- | kernel/sched/deadline.c | 39 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 45 |
3 files changed, 26 insertions, 81 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 740f90bdc67b..9917c962be99 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -1163,8 +1163,31 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma | |||
| 1163 | 1163 | ||
| 1164 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) | 1164 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
| 1165 | { | 1165 | { |
| 1166 | struct rq *rq = task_rq(p); | ||
| 1167 | bool queued, running; | ||
| 1168 | |||
| 1166 | lockdep_assert_held(&p->pi_lock); | 1169 | lockdep_assert_held(&p->pi_lock); |
| 1170 | |||
| 1171 | queued = task_on_rq_queued(p); | ||
| 1172 | running = task_current(rq, p); | ||
| 1173 | |||
| 1174 | if (queued) { | ||
| 1175 | /* | ||
| 1176 | * Because __kthread_bind() calls this on blocked tasks without | ||
| 1177 | * holding rq->lock. | ||
| 1178 | */ | ||
| 1179 | lockdep_assert_held(&rq->lock); | ||
| 1180 | dequeue_task(rq, p, 0); | ||
| 1181 | } | ||
| 1182 | if (running) | ||
| 1183 | put_prev_task(rq, p); | ||
| 1184 | |||
| 1167 | p->sched_class->set_cpus_allowed(p, new_mask); | 1185 | p->sched_class->set_cpus_allowed(p, new_mask); |
| 1186 | |||
| 1187 | if (running) | ||
| 1188 | p->sched_class->set_curr_task(rq); | ||
| 1189 | if (queued) | ||
| 1190 | enqueue_task(rq, p, 0); | ||
| 1168 | } | 1191 | } |
| 1169 | 1192 | ||
| 1170 | /* | 1193 | /* |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index dc357fa572b0..b4730565a45d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -1668,9 +1668,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) | |||
| 1668 | static void set_cpus_allowed_dl(struct task_struct *p, | 1668 | static void set_cpus_allowed_dl(struct task_struct *p, |
| 1669 | const struct cpumask *new_mask) | 1669 | const struct cpumask *new_mask) |
| 1670 | { | 1670 | { |
| 1671 | struct rq *rq; | ||
| 1672 | struct root_domain *src_rd; | 1671 | struct root_domain *src_rd; |
| 1673 | int weight; | 1672 | struct rq *rq; |
| 1674 | 1673 | ||
| 1675 | BUG_ON(!dl_task(p)); | 1674 | BUG_ON(!dl_task(p)); |
| 1676 | 1675 | ||
| @@ -1696,41 +1695,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, | |||
| 1696 | raw_spin_unlock(&src_dl_b->lock); | 1695 | raw_spin_unlock(&src_dl_b->lock); |
| 1697 | } | 1696 | } |
| 1698 | 1697 | ||
| 1699 | weight = cpumask_weight(new_mask); | 1698 | set_cpus_allowed_common(p, new_mask); |
| 1700 | |||
| 1701 | /* | ||
| 1702 | * Only update if the process changes its state from whether it | ||
| 1703 | * can migrate or not. | ||
| 1704 | */ | ||
| 1705 | if ((p->nr_cpus_allowed > 1) == (weight > 1)) | ||
| 1706 | goto done; | ||
| 1707 | |||
| 1708 | /* | ||
| 1709 | * Update only if the task is actually running (i.e., | ||
| 1710 | * it is on the rq AND it is not throttled). | ||
| 1711 | */ | ||
| 1712 | if (!on_dl_rq(&p->dl)) | ||
| 1713 | goto done; | ||
| 1714 | |||
| 1715 | /* | ||
| 1716 | * The process used to be able to migrate OR it can now migrate | ||
| 1717 | */ | ||
| 1718 | if (weight <= 1) { | ||
| 1719 | if (!task_current(rq, p)) | ||
| 1720 | dequeue_pushable_dl_task(rq, p); | ||
| 1721 | BUG_ON(!rq->dl.dl_nr_migratory); | ||
| 1722 | rq->dl.dl_nr_migratory--; | ||
| 1723 | } else { | ||
| 1724 | if (!task_current(rq, p)) | ||
| 1725 | enqueue_pushable_dl_task(rq, p); | ||
| 1726 | rq->dl.dl_nr_migratory++; | ||
| 1727 | } | ||
| 1728 | |||
| 1729 | update_dl_migration(&rq->dl); | ||
| 1730 | |||
| 1731 | done: | ||
| 1732 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
| 1733 | p->nr_cpus_allowed = weight; | ||
| 1734 | } | 1699 | } |
| 1735 | 1700 | ||
| 1736 | /* Assumes rq->lock is held */ | 1701 | /* Assumes rq->lock is held */ |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 63692efeca82..d2ea59364a1c 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -2076,49 +2076,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
| 2076 | push_rt_tasks(rq); | 2076 | push_rt_tasks(rq); |
| 2077 | } | 2077 | } |
| 2078 | 2078 | ||
| 2079 | static void set_cpus_allowed_rt(struct task_struct *p, | ||
| 2080 | const struct cpumask *new_mask) | ||
| 2081 | { | ||
| 2082 | struct rq *rq; | ||
| 2083 | int weight; | ||
| 2084 | |||
| 2085 | BUG_ON(!rt_task(p)); | ||
| 2086 | |||
| 2087 | weight = cpumask_weight(new_mask); | ||
| 2088 | |||
| 2089 | /* | ||
| 2090 | * Only update if the process changes its state from whether it | ||
| 2091 | * can migrate or not. | ||
| 2092 | */ | ||
| 2093 | if ((p->nr_cpus_allowed > 1) == (weight > 1)) | ||
| 2094 | goto done; | ||
| 2095 | |||
| 2096 | if (!task_on_rq_queued(p)) | ||
| 2097 | goto done; | ||
| 2098 | |||
| 2099 | rq = task_rq(p); | ||
| 2100 | |||
| 2101 | /* | ||
| 2102 | * The process used to be able to migrate OR it can now migrate | ||
| 2103 | */ | ||
| 2104 | if (weight <= 1) { | ||
| 2105 | if (!task_current(rq, p)) | ||
| 2106 | dequeue_pushable_task(rq, p); | ||
| 2107 | BUG_ON(!rq->rt.rt_nr_migratory); | ||
| 2108 | rq->rt.rt_nr_migratory--; | ||
| 2109 | } else { | ||
| 2110 | if (!task_current(rq, p)) | ||
| 2111 | enqueue_pushable_task(rq, p); | ||
| 2112 | rq->rt.rt_nr_migratory++; | ||
| 2113 | } | ||
| 2114 | |||
| 2115 | update_rt_migration(&rq->rt); | ||
| 2116 | |||
| 2117 | done: | ||
| 2118 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
| 2119 | p->nr_cpus_allowed = weight; | ||
| 2120 | } | ||
| 2121 | |||
| 2122 | /* Assumes rq->lock is held */ | 2079 | /* Assumes rq->lock is held */ |
| 2123 | static void rq_online_rt(struct rq *rq) | 2080 | static void rq_online_rt(struct rq *rq) |
| 2124 | { | 2081 | { |
| @@ -2327,7 +2284,7 @@ const struct sched_class rt_sched_class = { | |||
| 2327 | #ifdef CONFIG_SMP | 2284 | #ifdef CONFIG_SMP |
| 2328 | .select_task_rq = select_task_rq_rt, | 2285 | .select_task_rq = select_task_rq_rt, |
| 2329 | 2286 | ||
| 2330 | .set_cpus_allowed = set_cpus_allowed_rt, | 2287 | .set_cpus_allowed = set_cpus_allowed_common, |
| 2331 | .rq_online = rq_online_rt, | 2288 | .rq_online = rq_online_rt, |
| 2332 | .rq_offline = rq_offline_rt, | 2289 | .rq_offline = rq_offline_rt, |
| 2333 | .task_woken = task_woken_rt, | 2290 | .task_woken = task_woken_rt, |
