aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2016-10-28 18:04:48 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-10-30 21:10:16 -0400
commit4f341a5e48443fcc2e2d935ca990e462c02bb1a6 (patch)
tree3aa1e0fd99f3e60c7a5645c67432ed90c80f6b47
parent60ec2440c63dea88a5ef13e2b2549730a0d75a37 (diff)
x86/intel_rdt: Add scheduler hook
Hook the x86 scheduler code to update closid based on whether the current task is assigned to a specific closid or running on a CPU assigned to a specific closid. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Cc: "Ravi V Shankar" <ravi.v.shankar@intel.com> Cc: "Tony Luck" <tony.luck@intel.com> Cc: "Shaohua Li" <shli@fb.com> Cc: "Sai Prakhya" <sai.praneeth.prakhya@intel.com> Cc: "Peter Zijlstra" <peterz@infradead.org> Cc: "Stephane Eranian" <eranian@google.com> Cc: "Dave Hansen" <dave.hansen@intel.com> Cc: "David Carrillo-Cisneros" <davidcc@google.com> Cc: "Nilay Vaish" <nilayvaish@gmail.com> Cc: "Vikas Shivappa" <vikas.shivappa@linux.intel.com> Cc: "Ingo Molnar" <mingo@elte.hu> Cc: "Borislav Petkov" <bp@suse.de> Cc: "H. Peter Anvin" <h.peter.anvin@intel.com> Link: http://lkml.kernel.org/r/1477692289-37412-10-git-send-email-fenghua.yu@intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/include/asm/intel_rdt.h42
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c1
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c3
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c4
5 files changed, 53 insertions, 1 deletions
diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
index 2e5eab09083e..5bc72a4dbd5e 100644
--- a/arch/x86/include/asm/intel_rdt.h
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -1,8 +1,12 @@
1#ifndef _ASM_X86_INTEL_RDT_H 1#ifndef _ASM_X86_INTEL_RDT_H
2#define _ASM_X86_INTEL_RDT_H 2#define _ASM_X86_INTEL_RDT_H
3 3
4#ifdef CONFIG_INTEL_RDT_A
5
4#include <linux/jump_label.h> 6#include <linux/jump_label.h>
5 7
8#include <asm/intel_rdt_common.h>
9
6#define IA32_L3_QOS_CFG 0xc81 10#define IA32_L3_QOS_CFG 0xc81
7#define IA32_L3_CBM_BASE 0xc90 11#define IA32_L3_CBM_BASE 0xc90
8#define IA32_L2_CBM_BASE 0xd10 12#define IA32_L2_CBM_BASE 0xd10
@@ -176,4 +180,42 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
176 char *buf, size_t nbytes, loff_t off); 180 char *buf, size_t nbytes, loff_t off);
177int rdtgroup_schemata_show(struct kernfs_open_file *of, 181int rdtgroup_schemata_show(struct kernfs_open_file *of,
178 struct seq_file *s, void *v); 182 struct seq_file *s, void *v);
183
184/*
185 * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
186 *
187 * Following considerations are made so that this has minimal impact
188 * on scheduler hot path:
189 * - This will stay as no-op unless we are running on an Intel SKU
190 * which supports resource control and we enable by mounting the
191 * resctrl file system.
192 * - Caches the per cpu CLOSid values and does the MSR write only
193 * when a task with a different CLOSid is scheduled in.
194 */
195static inline void intel_rdt_sched_in(void)
196{
197 if (static_branch_likely(&rdt_enable_key)) {
198 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
199 int closid;
200
201 /*
202 * If this task has a closid assigned, use it.
203 * Else use the closid assigned to this cpu.
204 */
205 closid = current->closid;
206 if (closid == 0)
207 closid = this_cpu_read(cpu_closid);
208
209 if (closid != state->closid) {
210 state->closid = closid;
211 wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
212 }
213 }
214}
215
216#else
217
218static inline void intel_rdt_sched_in(void) {}
219
220#endif /* CONFIG_INTEL_RDT_A */
179#endif /* _ASM_X86_INTEL_RDT_H */ 221#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 40094aed5f71..5a533fefefa0 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -29,7 +29,6 @@
29#include <linux/cacheinfo.h> 29#include <linux/cacheinfo.h>
30#include <linux/cpuhotplug.h> 30#include <linux/cpuhotplug.h>
31 31
32#include <asm/intel_rdt_common.h>
33#include <asm/intel-family.h> 32#include <asm/intel-family.h>
34#include <asm/intel_rdt.h> 33#include <asm/intel_rdt.h>
35 34
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 5c4bab9452b0..a90ad22b9823 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -292,6 +292,9 @@ static void move_myself(struct callback_head *head)
292 kfree(rdtgrp); 292 kfree(rdtgrp);
293 } 293 }
294 294
295 /* update PQR_ASSOC MSR to make resource group go into effect */
296 intel_rdt_sched_in();
297
295 kfree(callback); 298 kfree(callback);
296} 299}
297 300
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd7be8efdc4c..efe7f9fce44e 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -54,6 +54,7 @@
54#include <asm/debugreg.h> 54#include <asm/debugreg.h>
55#include <asm/switch_to.h> 55#include <asm/switch_to.h>
56#include <asm/vm86.h> 56#include <asm/vm86.h>
57#include <asm/intel_rdt.h>
57 58
58void __show_regs(struct pt_regs *regs, int all) 59void __show_regs(struct pt_regs *regs, int all)
59{ 60{
@@ -299,5 +300,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
299 300
300 this_cpu_write(current_task, next_p); 301 this_cpu_write(current_task, next_p);
301 302
303 /* Load the Intel cache allocation PQR MSR. */
304 intel_rdt_sched_in();
305
302 return prev_p; 306 return prev_p;
303} 307}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b3760b3c1ca0..acd7d6f507af 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -50,6 +50,7 @@
50#include <asm/switch_to.h> 50#include <asm/switch_to.h>
51#include <asm/xen/hypervisor.h> 51#include <asm/xen/hypervisor.h>
52#include <asm/vdso.h> 52#include <asm/vdso.h>
53#include <asm/intel_rdt.h>
53 54
54__visible DEFINE_PER_CPU(unsigned long, rsp_scratch); 55__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
55 56
@@ -473,6 +474,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
473 loadsegment(ss, __KERNEL_DS); 474 loadsegment(ss, __KERNEL_DS);
474 } 475 }
475 476
477 /* Load the Intel cache allocation PQR MSR. */
478 intel_rdt_sched_in();
479
476 return prev_p; 480 return prev_p;
477} 481}
478 482