aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVikas Shivappa <vikas.shivappa@linux.intel.com>2017-07-25 17:14:36 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-08-01 16:41:25 -0400
commita9fcf8627dc01049c390023bbb0323db3c785b91 (patch)
treed9f3e563b580313cd5ddf6685bb600ee05c2e347
parentb09d981b3f346690dafa3e4ebedfcf3e44b68e83 (diff)
x86/intel_rdt/cqm: Add cpus file support
The cpus file is extended to support resource monitoring. This is used to over-ride the RMID of the default group when running on specific CPUs. It works similar to the resource control. The "cpus" and "cpus_list" file is present in default group, ctrl_mon groups and monitor groups. Each "cpus" file or cpu_list file reads a cpumask or list showing which CPUs belong to the resource group. By default all online cpus belong to the default root group. A CPU can be present in one "ctrl_mon" and one "monitor" group simultaneously. They can be added to a resource group by writing the CPU to the file. When a CPU is added to a ctrl_mon group it is automatically removed from the previous ctrl_mon group. A CPU can be added to a monitor group only if it is present in the parent ctrl_mon group and when a CPU is added to a monitor group, it is automatically removed from the previous monitor group. When CPUs go offline, they are automatically removed from the ctrl_mon and monitor groups. Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: ravi.v.shankar@intel.com Cc: tony.luck@intel.com Cc: fenghua.yu@intel.com Cc: peterz@infradead.org Cc: eranian@google.com Cc: vikas.shivappa@intel.com Cc: ak@linux.intel.com Cc: davidcc@google.com Cc: reinette.chatre@intel.com Link: http://lkml.kernel.org/r/1501017287-28083-18-git-send-email-vikas.shivappa@linux.intel.com
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c110
1 files changed, 93 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index d11f4629a702..89457bba457a 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -181,15 +181,17 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
181/* 181/*
182 * This is safe against intel_rdt_sched_in() called from __switch_to() 182 * This is safe against intel_rdt_sched_in() called from __switch_to()
183 * because __switch_to() is executed with interrupts disabled. A local call 183 * because __switch_to() is executed with interrupts disabled. A local call
184 * from update_closid() is proteced against __switch_to() because 184 * from update_closid_rmid() is proteced against __switch_to() because
185 * preemption is disabled. 185 * preemption is disabled.
186 */ 186 */
187static void update_cpu_closid(void *info) 187static void update_cpu_closid_rmid(void *info)
188{ 188{
189 struct rdtgroup *r = info; 189 struct rdtgroup *r = info;
190 190
191 if (r) 191 if (r) {
192 this_cpu_write(rdt_cpu_default.closid, r->closid); 192 this_cpu_write(rdt_cpu_default.closid, r->closid);
193 this_cpu_write(rdt_cpu_default.rmid, r->mon.rmid);
194 }
193 195
194 /* 196 /*
195 * We cannot unconditionally write the MSR because the current 197 * We cannot unconditionally write the MSR because the current
@@ -205,20 +207,72 @@ static void update_cpu_closid(void *info)
205 * Per task closids/rmids must have been set up before calling this function. 207 * Per task closids/rmids must have been set up before calling this function.
206 */ 208 */
207static void 209static void
208update_closid(const struct cpumask *cpu_mask, struct rdtgroup *r) 210update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
209{ 211{
210 int cpu = get_cpu(); 212 int cpu = get_cpu();
211 213
212 if (cpumask_test_cpu(cpu, cpu_mask)) 214 if (cpumask_test_cpu(cpu, cpu_mask))
213 update_cpu_closid(r); 215 update_cpu_closid_rmid(r);
214 smp_call_function_many(cpu_mask, update_cpu_closid, r, 1); 216 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
215 put_cpu(); 217 put_cpu();
216} 218}
217 219
220static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
221 cpumask_var_t tmpmask)
222{
223 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
224 struct list_head *head;
225
226 /* Check whether cpus belong to parent ctrl group */
227 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
228 if (cpumask_weight(tmpmask))
229 return -EINVAL;
230
231 /* Check whether cpus are dropped from this group */
232 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
233 if (cpumask_weight(tmpmask)) {
234 /* Give any dropped cpus to parent rdtgroup */
235 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
236 update_closid_rmid(tmpmask, prgrp);
237 }
238
239 /*
240 * If we added cpus, remove them from previous group that owned them
241 * and update per-cpu rmid
242 */
243 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
244 if (cpumask_weight(tmpmask)) {
245 head = &prgrp->mon.crdtgrp_list;
246 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
247 if (crgrp == rdtgrp)
248 continue;
249 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
250 tmpmask);
251 }
252 update_closid_rmid(tmpmask, rdtgrp);
253 }
254
255 /* Done pushing/pulling - update this group with new mask */
256 cpumask_copy(&rdtgrp->cpu_mask, newmask);
257
258 return 0;
259}
260
261static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
262{
263 struct rdtgroup *crgrp;
264
265 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
266 /* update the child mon group masks as well*/
267 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
268 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
269}
270
218static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 271static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
219 cpumask_var_t tmpmask) 272 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
220{ 273{
221 struct rdtgroup *r; 274 struct rdtgroup *r, *crgrp;
275 struct list_head *head;
222 276
223 /* Check whether cpus are dropped from this group */ 277 /* Check whether cpus are dropped from this group */
224 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 278 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
@@ -230,33 +284,47 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
230 /* Give any dropped cpus to rdtgroup_default */ 284 /* Give any dropped cpus to rdtgroup_default */
231 cpumask_or(&rdtgroup_default.cpu_mask, 285 cpumask_or(&rdtgroup_default.cpu_mask,
232 &rdtgroup_default.cpu_mask, tmpmask); 286 &rdtgroup_default.cpu_mask, tmpmask);
233 update_closid(tmpmask, &rdtgroup_default); 287 update_closid_rmid(tmpmask, &rdtgroup_default);
234 } 288 }
235 289
236 /* 290 /*
237 * If we added cpus, remove them from previous group that owned them 291 * If we added cpus, remove them from previous group and
238 * and update per-cpu closid 292 * the prev group's child groups that owned them
293 * and update per-cpu closid/rmid.
239 */ 294 */
240 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 295 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
241 if (cpumask_weight(tmpmask)) { 296 if (cpumask_weight(tmpmask)) {
242 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 297 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
243 if (r == rdtgrp) 298 if (r == rdtgrp)
244 continue; 299 continue;
245 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask); 300 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
301 if (cpumask_weight(tmpmask1))
302 cpumask_rdtgrp_clear(r, tmpmask1);
246 } 303 }
247 update_closid(tmpmask, rdtgrp); 304 update_closid_rmid(tmpmask, rdtgrp);
248 } 305 }
249 306
250 /* Done pushing/pulling - update this group with new mask */ 307 /* Done pushing/pulling - update this group with new mask */
251 cpumask_copy(&rdtgrp->cpu_mask, newmask); 308 cpumask_copy(&rdtgrp->cpu_mask, newmask);
252 309
310 /*
311 * Clear child mon group masks since there is a new parent mask
312 * now and update the rmid for the cpus the child lost.
313 */
314 head = &rdtgrp->mon.crdtgrp_list;
315 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
316 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
317 update_closid_rmid(tmpmask, rdtgrp);
318 cpumask_clear(&crgrp->cpu_mask);
319 }
320
253 return 0; 321 return 0;
254} 322}
255 323
256static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, 324static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
257 char *buf, size_t nbytes, loff_t off) 325 char *buf, size_t nbytes, loff_t off)
258{ 326{
259 cpumask_var_t tmpmask, newmask; 327 cpumask_var_t tmpmask, newmask, tmpmask1;
260 struct rdtgroup *rdtgrp; 328 struct rdtgroup *rdtgrp;
261 int ret; 329 int ret;
262 330
@@ -269,6 +337,11 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
269 free_cpumask_var(tmpmask); 337 free_cpumask_var(tmpmask);
270 return -ENOMEM; 338 return -ENOMEM;
271 } 339 }
340 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
341 free_cpumask_var(tmpmask);
342 free_cpumask_var(newmask);
343 return -ENOMEM;
344 }
272 345
273 rdtgrp = rdtgroup_kn_lock_live(of->kn); 346 rdtgrp = rdtgroup_kn_lock_live(of->kn);
274 if (!rdtgrp) { 347 if (!rdtgrp) {
@@ -292,7 +365,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
292 } 365 }
293 366
294 if (rdtgrp->type == RDTCTRL_GROUP) 367 if (rdtgrp->type == RDTCTRL_GROUP)
295 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask); 368 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
369 else if (rdtgrp->type == RDTMON_GROUP)
370 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
296 else 371 else
297 ret = -EINVAL; 372 ret = -EINVAL;
298 373
@@ -300,6 +375,7 @@ unlock:
300 rdtgroup_kn_unlock(of->kn); 375 rdtgroup_kn_unlock(of->kn);
301 free_cpumask_var(tmpmask); 376 free_cpumask_var(tmpmask);
302 free_cpumask_var(newmask); 377 free_cpumask_var(newmask);
378 free_cpumask_var(tmpmask1);
303 379
304 return ret ?: nbytes; 380 return ret ?: nbytes;
305} 381}
@@ -1113,7 +1189,7 @@ static void rmdir_all_sub(void)
1113 } 1189 }
1114 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 1190 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
1115 get_online_cpus(); 1191 get_online_cpus();
1116 update_closid(cpu_online_mask, &rdtgroup_default); 1192 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
1117 put_online_cpus(); 1193 put_online_cpus();
1118 1194
1119 kernfs_remove(kn_info); 1195 kernfs_remove(kn_info);
@@ -1374,7 +1450,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
1374 * task running on them. 1450 * task running on them.
1375 */ 1451 */
1376 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 1452 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
1377 update_closid(tmpmask, NULL); 1453 update_closid_rmid(tmpmask, NULL);
1378 1454
1379 rdtgrp->flags = RDT_DELETED; 1455 rdtgrp->flags = RDT_DELETED;
1380 closid_free(rdtgrp->closid); 1456 closid_free(rdtgrp->closid);