aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPatrick Bellasi <patrick.bellasi@arm.com>2019-08-22 09:28:10 -0400
committerIngo Molnar <mingo@kernel.org>2019-09-03 03:17:40 -0400
commitbabbe170e053c6ec2343751749995b7b9fd5fd2c (patch)
tree12bfc53f330e70512f0a83023ff0ec1fba8c29c9 /kernel/sched/core.c
parent3eac870a324728e5d17118888840dad70bcd37f3 (diff)
sched/uclamp: Update CPU's refcount on TG's clamp changes
On updates of task group (TG) clamp values, ensure that these new values are enforced on all RUNNABLE tasks of the task group, i.e. all RUNNABLE tasks are immediately boosted and/or capped as requested. Do that each time we update effective clamps from cpu_util_update_eff(). Use the *cgroup_subsys_state (css) to walk the list of tasks in each affected TG and update their RUNNABLE tasks. Update each task by using the same mechanism used for cpu affinity masks updates, i.e. by taking the rq lock. Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Michal Koutny <mkoutny@suse.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Alessio Balsini <balsini@android.com> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Joel Fernandes <joelaf@google.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Paul Turner <pjt@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Perret <quentin.perret@arm.com> Cc: Rafael J . Wysocki <rafael.j.wysocki@intel.com> Cc: Steve Muckle <smuckle@google.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Todd Kjos <tkjos@google.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Viresh Kumar <viresh.kumar@linaro.org> Link: https://lkml.kernel.org/r/20190822132811.31294-6-patrick.bellasi@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c55
1 files changed, 54 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c32ac071c203..55a1c07045ff 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1043,6 +1043,54 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1043 uclamp_rq_dec_id(rq, p, clamp_id); 1043 uclamp_rq_dec_id(rq, p, clamp_id);
1044} 1044}
1045 1045
1046static inline void
1047uclamp_update_active(struct task_struct *p, unsigned int clamp_id)
1048{
1049 struct rq_flags rf;
1050 struct rq *rq;
1051
1052 /*
1053 * Lock the task and the rq where the task is (or was) queued.
1054 *
1055 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1056 * price to pay to safely serialize util_{min,max} updates with
1057 * enqueues, dequeues and migration operations.
1058 * This is the same locking schema used by __set_cpus_allowed_ptr().
1059 */
1060 rq = task_rq_lock(p, &rf);
1061
1062 /*
1063 * Setting the clamp bucket is serialized by task_rq_lock().
1064 * If the task is not yet RUNNABLE and its task_struct is not
1065 * affecting a valid clamp bucket, the next time it's enqueued,
1066 * it will already see the updated clamp bucket value.
1067 */
1068 if (!p->uclamp[clamp_id].active) {
1069 uclamp_rq_dec_id(rq, p, clamp_id);
1070 uclamp_rq_inc_id(rq, p, clamp_id);
1071 }
1072
1073 task_rq_unlock(rq, p, &rf);
1074}
1075
1076static inline void
1077uclamp_update_active_tasks(struct cgroup_subsys_state *css,
1078 unsigned int clamps)
1079{
1080 struct css_task_iter it;
1081 struct task_struct *p;
1082 unsigned int clamp_id;
1083
1084 css_task_iter_start(css, 0, &it);
1085 while ((p = css_task_iter_next(&it))) {
1086 for_each_clamp_id(clamp_id) {
1087 if ((0x1 << clamp_id) & clamps)
1088 uclamp_update_active(p, clamp_id);
1089 }
1090 }
1091 css_task_iter_end(&it);
1092}
1093
1046#ifdef CONFIG_UCLAMP_TASK_GROUP 1094#ifdef CONFIG_UCLAMP_TASK_GROUP
1047static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1095static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1048static void uclamp_update_root_tg(void) 1096static void uclamp_update_root_tg(void)
@@ -7160,8 +7208,13 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
7160 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 7208 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
7161 clamps |= (0x1 << clamp_id); 7209 clamps |= (0x1 << clamp_id);
7162 } 7210 }
7163 if (!clamps) 7211 if (!clamps) {
7164 css = css_rightmost_descendant(css); 7212 css = css_rightmost_descendant(css);
7213 continue;
7214 }
7215
7216 /* Immediately update descendants RUNNABLE tasks */
7217 uclamp_update_active_tasks(css, clamps);
7165 } 7218 }
7166} 7219}
7167 7220