aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2016-11-11 20:02:37 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-11-15 12:35:50 -0500
commitc7cc0cc10cdecc275211c8749defba6c41aaf5de (patch)
tree351a83bdd34dcf688a43ea205a65074b07240d13
parent59fe5a77d473f3519dbee8ef5e77c69897a838f9 (diff)
x86/intel_rdt: Reset per cpu closids on unmount
All CPUs in a rdtgroup are given back to the default rdtgroup before the rdtgroup is removed during umount. After umount, the default rdtgroup contains all online CPUs, but the per cpu closids are not cleared. As a result the stale closid value will be used immediately after the next mount. Move all cpus to the default group and update the percpu closid storage. [ tglx: Massaged changelong ] Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Cc: "Ravi V Shankar" <ravi.v.shankar@intel.com> Cc: "Tony Luck" <tony.luck@intel.com> Cc: "Sai Prakhya" <sai.praneeth.prakhya@intel.com> Cc: "Vikas Shivappa" <vikas.shivappa@linux.intel.com> Cc: "Ingo Molnar" <mingo@elte.hu> Cc: "H. Peter Anvin" <h.peter.anvin@intel.com> Link: http://lkml.kernel.org/r/1478912558-55514-2-git-send-email-fenghua.yu@intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 2f54931e0fa9..d6bad092f542 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -799,6 +799,7 @@ static void rmdir_all_sub(void)
799{ 799{
800 struct rdtgroup *rdtgrp, *tmp; 800 struct rdtgroup *rdtgrp, *tmp;
801 struct task_struct *p, *t; 801 struct task_struct *p, *t;
802 int cpu;
802 803
803 /* move all tasks to default resource group */ 804 /* move all tasks to default resource group */
804 read_lock(&tasklist_lock); 805 read_lock(&tasklist_lock);
@@ -813,14 +814,29 @@ static void rmdir_all_sub(void)
813 smp_call_function_many(cpu_online_mask, rdt_reset_pqr_assoc_closid, 814 smp_call_function_many(cpu_online_mask, rdt_reset_pqr_assoc_closid,
814 NULL, 1); 815 NULL, 1);
815 put_cpu(); 816 put_cpu();
817
816 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { 818 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
817 /* Remove each rdtgroup other than root */ 819 /* Remove each rdtgroup other than root */
818 if (rdtgrp == &rdtgroup_default) 820 if (rdtgrp == &rdtgroup_default)
819 continue; 821 continue;
822
823 /*
824 * Give any CPUs back to the default group. We cannot copy
825 * cpu_online_mask because a CPU might have executed the
826 * offline callback already, but is still marked online.
827 */
828 cpumask_or(&rdtgroup_default.cpu_mask,
829 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
830
820 kernfs_remove(rdtgrp->kn); 831 kernfs_remove(rdtgrp->kn);
821 list_del(&rdtgrp->rdtgroup_list); 832 list_del(&rdtgrp->rdtgroup_list);
822 kfree(rdtgrp); 833 kfree(rdtgrp);
823 } 834 }
835
836 /* Reset all per cpu closids to the default value */
837 for_each_cpu(cpu, &rdtgroup_default.cpu_mask)
838 per_cpu(cpu_closid, cpu) = 0;
839
824 kernfs_remove(kn_info); 840 kernfs_remove(kn_info);
825} 841}
826 842