diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2015-05-15 11:43:36 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2015-08-12 06:06:10 -0400 |
| commit | 6c37067e27867db172b988cc11b9ff921175dee5 (patch) | |
| tree | b9e332602d82844938ba55514c93125f80c240c1 /kernel/sched/rt.c | |
| parent | c5b2803840817115e9b568d5054e5007ae36176b (diff) | |
sched: Change the sched_class::set_cpus_allowed() calling context
Change the calling context of sched_class::set_cpus_allowed() such
that we can assume the task is inactive.
This allows us to easily make changes that affect accounting done by
enqueue/dequeue. This does in fact completely remove
set_cpus_allowed_rt() and greatly reduces set_cpus_allowed_dl().
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dedekind1@gmail.com
Cc: juri.lelli@arm.com
Cc: mgorman@suse.de
Cc: riel@redhat.com
Cc: rostedt@goodmis.org
Link: http://lkml.kernel.org/r/20150515154833.667516139@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/rt.c')
| -rw-r--r-- | kernel/sched/rt.c | 45 |
1 files changed, 1 insertions, 44 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 63692efeca82..d2ea59364a1c 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -2076,49 +2076,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
| 2076 | push_rt_tasks(rq); | 2076 | push_rt_tasks(rq); |
| 2077 | } | 2077 | } |
| 2078 | 2078 | ||
| 2079 | static void set_cpus_allowed_rt(struct task_struct *p, | ||
| 2080 | const struct cpumask *new_mask) | ||
| 2081 | { | ||
| 2082 | struct rq *rq; | ||
| 2083 | int weight; | ||
| 2084 | |||
| 2085 | BUG_ON(!rt_task(p)); | ||
| 2086 | |||
| 2087 | weight = cpumask_weight(new_mask); | ||
| 2088 | |||
| 2089 | /* | ||
| 2090 | * Only update if the process changes its state from whether it | ||
| 2091 | * can migrate or not. | ||
| 2092 | */ | ||
| 2093 | if ((p->nr_cpus_allowed > 1) == (weight > 1)) | ||
| 2094 | goto done; | ||
| 2095 | |||
| 2096 | if (!task_on_rq_queued(p)) | ||
| 2097 | goto done; | ||
| 2098 | |||
| 2099 | rq = task_rq(p); | ||
| 2100 | |||
| 2101 | /* | ||
| 2102 | * The process used to be able to migrate OR it can now migrate | ||
| 2103 | */ | ||
| 2104 | if (weight <= 1) { | ||
| 2105 | if (!task_current(rq, p)) | ||
| 2106 | dequeue_pushable_task(rq, p); | ||
| 2107 | BUG_ON(!rq->rt.rt_nr_migratory); | ||
| 2108 | rq->rt.rt_nr_migratory--; | ||
| 2109 | } else { | ||
| 2110 | if (!task_current(rq, p)) | ||
| 2111 | enqueue_pushable_task(rq, p); | ||
| 2112 | rq->rt.rt_nr_migratory++; | ||
| 2113 | } | ||
| 2114 | |||
| 2115 | update_rt_migration(&rq->rt); | ||
| 2116 | |||
| 2117 | done: | ||
| 2118 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
| 2119 | p->nr_cpus_allowed = weight; | ||
| 2120 | } | ||
| 2121 | |||
| 2122 | /* Assumes rq->lock is held */ | 2079 | /* Assumes rq->lock is held */ |
| 2123 | static void rq_online_rt(struct rq *rq) | 2080 | static void rq_online_rt(struct rq *rq) |
| 2124 | { | 2081 | { |
| @@ -2327,7 +2284,7 @@ const struct sched_class rt_sched_class = { | |||
| 2327 | #ifdef CONFIG_SMP | 2284 | #ifdef CONFIG_SMP |
| 2328 | .select_task_rq = select_task_rq_rt, | 2285 | .select_task_rq = select_task_rq_rt, |
| 2329 | 2286 | ||
| 2330 | .set_cpus_allowed = set_cpus_allowed_rt, | 2287 | .set_cpus_allowed = set_cpus_allowed_common, |
| 2331 | .rq_online = rq_online_rt, | 2288 | .rq_online = rq_online_rt, |
| 2332 | .rq_offline = rq_offline_rt, | 2289 | .rq_offline = rq_offline_rt, |
| 2333 | .task_woken = task_woken_rt, | 2290 | .task_woken = task_woken_rt, |
