summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorMathieu Poirier <mathieu.poirier@linaro.org>2019-07-19 09:59:55 -0400
committerIngo Molnar <mingo@kernel.org>2019-07-25 09:55:01 -0400
commitf9a25f776d780bfa3279f0b6e5f5cf3224997976 (patch)
tree8b1fa082ea0a57b11fbee5fc91e208442fcafd0f /kernel/sched
parent4b211f2b129dd1f6a6956bbc76e2f232c1ec3ad8 (diff)
cpusets: Rebuild root domain deadline accounting information
When the topology of root domains is modified by CPUset or CPUhotplug operations information about the current deadline bandwidth held in the root domain is lost. This patch addresses the issue by recalculating the lost deadline bandwidth information by circling through the deadline tasks held in CPUsets and adding their current load to the root domain they are associated with. Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> Signed-off-by: Juri Lelli <juri.lelli@redhat.com> [ Various additional modifications. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bristot@redhat.com Cc: claudio@evidence.eu.com Cc: lizefan@huawei.com Cc: longman@redhat.com Cc: luca.abeni@santannapisa.it Cc: rostedt@goodmis.org Cc: tj@kernel.org Cc: tommaso.cucinotta@santannapisa.it Link: https://lkml.kernel.org/r/20190719140000.31694-4-juri.lelli@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/deadline.c30
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--kernel/sched/topology.c13
3 files changed, 42 insertions, 4 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index ef5b9f6b1d42..0f9d2180be23 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2283,6 +2283,36 @@ void __init init_sched_dl_class(void)
2283 GFP_KERNEL, cpu_to_node(i)); 2283 GFP_KERNEL, cpu_to_node(i));
2284} 2284}
2285 2285
2286void dl_add_task_root_domain(struct task_struct *p)
2287{
2288 struct rq_flags rf;
2289 struct rq *rq;
2290 struct dl_bw *dl_b;
2291
2292 rq = task_rq_lock(p, &rf);
2293 if (!dl_task(p))
2294 goto unlock;
2295
2296 dl_b = &rq->rd->dl_bw;
2297 raw_spin_lock(&dl_b->lock);
2298
2299 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2300
2301 raw_spin_unlock(&dl_b->lock);
2302
2303unlock:
2304 task_rq_unlock(rq, p, &rf);
2305}
2306
2307void dl_clear_root_domain(struct root_domain *rd)
2308{
2309 unsigned long flags;
2310
2311 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2312 rd->dl_bw.total_bw = 0;
2313 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2314}
2315
2286#endif /* CONFIG_SMP */ 2316#endif /* CONFIG_SMP */
2287 2317
2288static void switched_from_dl(struct rq *rq, struct task_struct *p) 2318static void switched_from_dl(struct rq *rq, struct task_struct *p)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 16126efd14ed..7583faddba33 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -778,9 +778,6 @@ struct root_domain {
778 struct perf_domain __rcu *pd; 778 struct perf_domain __rcu *pd;
779}; 779};
780 780
781extern struct root_domain def_root_domain;
782extern struct mutex sched_domains_mutex;
783
784extern void init_defrootdomain(void); 781extern void init_defrootdomain(void);
785extern int sched_init_domains(const struct cpumask *cpu_map); 782extern int sched_init_domains(const struct cpumask *cpu_map);
786extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 783extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 5a174ae6ecf3..8f83e8e3ea9a 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2203,8 +2203,19 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
2203 for (i = 0; i < ndoms_cur; i++) { 2203 for (i = 0; i < ndoms_cur; i++) {
2204 for (j = 0; j < n && !new_topology; j++) { 2204 for (j = 0; j < n && !new_topology; j++) {
2205 if (cpumask_equal(doms_cur[i], doms_new[j]) && 2205 if (cpumask_equal(doms_cur[i], doms_new[j]) &&
2206 dattrs_equal(dattr_cur, i, dattr_new, j)) 2206 dattrs_equal(dattr_cur, i, dattr_new, j)) {
2207 struct root_domain *rd;
2208
2209 /*
2210 * This domain won't be destroyed and as such
2211 * its dl_bw->total_bw needs to be cleared. It
2212 * will be recomputed in function
2213 * update_tasks_root_domain().
2214 */
2215 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2216 dl_clear_root_domain(rd);
2207 goto match1; 2217 goto match1;
2218 }
2208 } 2219 }
2209 /* No match - a current sched domain not in new doms_new[] */ 2220 /* No match - a current sched domain not in new doms_new[] */
2210 detach_destroy_domains(doms_cur[i]); 2221 detach_destroy_domains(doms_cur[i]);