diff options
| -rw-r--r-- | Documentation/x86/resctrl_ui.txt (renamed from Documentation/x86/intel_rdt_ui.txt) | 9 | ||||
| -rw-r--r-- | MAINTAINERS | 6 | ||||
| -rw-r--r-- | arch/x86/Kconfig | 22 | ||||
| -rw-r--r-- | arch/x86/include/asm/resctrl_sched.h (renamed from arch/x86/include/asm/intel_rdt_sched.h) | 28 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/Makefile | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/resctrl/Makefile | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/resctrl/core.c (renamed from arch/x86/kernel/cpu/intel_rdt.c) | 186 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/resctrl/ctrlmondata.c (renamed from arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c) | 107 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/resctrl/internal.h (renamed from arch/x86/kernel/cpu/intel_rdt.h) | 55 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/resctrl/monitor.c (renamed from arch/x86/kernel/cpu/intel_rdt_monitor.c) | 16 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/resctrl/pseudo_lock.c (renamed from arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c) | 40 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h (renamed from arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h) | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/resctrl/rdtgroup.c (renamed from arch/x86/kernel/cpu/intel_rdt_rdtgroup.c) | 61 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/scattered.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/process_32.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/process_64.c | 4 | ||||
| -rw-r--r-- | include/linux/sched.h | 2 |
17 files changed, 385 insertions, 173 deletions
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/resctrl_ui.txt index 52b10945ff75..d9aed8303984 100644 --- a/Documentation/x86/intel_rdt_ui.txt +++ b/Documentation/x86/resctrl_ui.txt | |||
| @@ -1,4 +1,7 @@ | |||
| 1 | User Interface for Resource Allocation in Intel Resource Director Technology | 1 | User Interface for Resource Control feature |
| 2 | |||
| 3 | Intel refers to this feature as Intel Resource Director Technology(Intel(R) RDT). | ||
| 4 | AMD refers to this feature as AMD Platform Quality of Service(AMD QoS). | ||
| 2 | 5 | ||
| 3 | Copyright (C) 2016 Intel Corporation | 6 | Copyright (C) 2016 Intel Corporation |
| 4 | 7 | ||
| @@ -6,8 +9,8 @@ Fenghua Yu <fenghua.yu@intel.com> | |||
| 6 | Tony Luck <tony.luck@intel.com> | 9 | Tony Luck <tony.luck@intel.com> |
| 7 | Vikas Shivappa <vikas.shivappa@intel.com> | 10 | Vikas Shivappa <vikas.shivappa@intel.com> |
| 8 | 11 | ||
| 9 | This feature is enabled by the CONFIG_INTEL_RDT Kconfig and the | 12 | This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo |
| 10 | X86 /proc/cpuinfo flag bits: | 13 | flag bits: |
| 11 | RDT (Resource Director Technology) Allocation - "rdt_a" | 14 | RDT (Resource Director Technology) Allocation - "rdt_a" |
| 12 | CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" | 15 | CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" |
| 13 | CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2" | 16 | CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2" |
diff --git a/MAINTAINERS b/MAINTAINERS index c4665d49dc50..dd08e7018b17 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -12717,9 +12717,9 @@ M: Fenghua Yu <fenghua.yu@intel.com> | |||
| 12717 | M: Reinette Chatre <reinette.chatre@intel.com> | 12717 | M: Reinette Chatre <reinette.chatre@intel.com> |
| 12718 | L: linux-kernel@vger.kernel.org | 12718 | L: linux-kernel@vger.kernel.org |
| 12719 | S: Supported | 12719 | S: Supported |
| 12720 | F: arch/x86/kernel/cpu/intel_rdt* | 12720 | F: arch/x86/kernel/cpu/resctrl/ |
| 12721 | F: arch/x86/include/asm/intel_rdt_sched.h | 12721 | F: arch/x86/include/asm/resctrl_sched.h |
| 12722 | F: Documentation/x86/intel_rdt* | 12722 | F: Documentation/x86/resctrl* |
| 12723 | 12723 | ||
| 12724 | READ-COPY UPDATE (RCU) | 12724 | READ-COPY UPDATE (RCU) |
| 12725 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> | 12725 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c2a22a74abee..c7094f813183 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -444,15 +444,23 @@ config RETPOLINE | |||
| 444 | branches. Requires a compiler with -mindirect-branch=thunk-extern | 444 | branches. Requires a compiler with -mindirect-branch=thunk-extern |
| 445 | support for full protection. The kernel may run slower. | 445 | support for full protection. The kernel may run slower. |
| 446 | 446 | ||
| 447 | config INTEL_RDT | 447 | config RESCTRL |
| 448 | bool "Intel Resource Director Technology support" | 448 | bool "Resource Control support" |
| 449 | depends on X86 && CPU_SUP_INTEL | 449 | depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) |
| 450 | select KERNFS | 450 | select KERNFS |
| 451 | help | 451 | help |
| 452 | Select to enable resource allocation and monitoring which are | 452 | Enable Resource Control support. |
| 453 | sub-features of Intel Resource Director Technology(RDT). More | 453 | |
| 454 | information about RDT can be found in the Intel x86 | 454 | Provide support for the allocation and monitoring of system resources |
| 455 | Architecture Software Developer Manual. | 455 | usage by the CPU. |
| 456 | |||
| 457 | Intel calls this Intel Resource Director Technology | ||
| 458 | (Intel(R) RDT). More information about RDT can be found in the | ||
| 459 | Intel x86 Architecture Software Developer Manual. | ||
| 460 | |||
| 461 | AMD calls this AMD Platform Quality of Service (AMD QoS). | ||
| 462 | More information about AMD QoS can be found in the AMD64 Technology | ||
| 463 | Platform Quality of Service Extensions manual. | ||
| 456 | 464 | ||
| 457 | Say N if unsure. | 465 | Say N if unsure. |
| 458 | 466 | ||
diff --git a/arch/x86/include/asm/intel_rdt_sched.h b/arch/x86/include/asm/resctrl_sched.h index 9acb06b6f81e..54990fe2a3ae 100644 --- a/arch/x86/include/asm/intel_rdt_sched.h +++ b/arch/x86/include/asm/resctrl_sched.h | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_INTEL_RDT_SCHED_H | 2 | #ifndef _ASM_X86_RESCTRL_SCHED_H |
| 3 | #define _ASM_X86_INTEL_RDT_SCHED_H | 3 | #define _ASM_X86_RESCTRL_SCHED_H |
| 4 | 4 | ||
| 5 | #ifdef CONFIG_INTEL_RDT | 5 | #ifdef CONFIG_RESCTRL |
| 6 | 6 | ||
| 7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
| 8 | #include <linux/jump_label.h> | 8 | #include <linux/jump_label.h> |
| @@ -10,7 +10,7 @@ | |||
| 10 | #define IA32_PQR_ASSOC 0x0c8f | 10 | #define IA32_PQR_ASSOC 0x0c8f |
| 11 | 11 | ||
| 12 | /** | 12 | /** |
| 13 | * struct intel_pqr_state - State cache for the PQR MSR | 13 | * struct resctrl_pqr_state - State cache for the PQR MSR |
| 14 | * @cur_rmid: The cached Resource Monitoring ID | 14 | * @cur_rmid: The cached Resource Monitoring ID |
| 15 | * @cur_closid: The cached Class Of Service ID | 15 | * @cur_closid: The cached Class Of Service ID |
| 16 | * @default_rmid: The user assigned Resource Monitoring ID | 16 | * @default_rmid: The user assigned Resource Monitoring ID |
| @@ -24,21 +24,21 @@ | |||
| 24 | * The cache also helps to avoid pointless updates if the value does | 24 | * The cache also helps to avoid pointless updates if the value does |
| 25 | * not change. | 25 | * not change. |
| 26 | */ | 26 | */ |
| 27 | struct intel_pqr_state { | 27 | struct resctrl_pqr_state { |
| 28 | u32 cur_rmid; | 28 | u32 cur_rmid; |
| 29 | u32 cur_closid; | 29 | u32 cur_closid; |
| 30 | u32 default_rmid; | 30 | u32 default_rmid; |
| 31 | u32 default_closid; | 31 | u32 default_closid; |
| 32 | }; | 32 | }; |
| 33 | 33 | ||
| 34 | DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); | 34 | DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); |
| 35 | 35 | ||
| 36 | DECLARE_STATIC_KEY_FALSE(rdt_enable_key); | 36 | DECLARE_STATIC_KEY_FALSE(rdt_enable_key); |
| 37 | DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); | 37 | DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); |
| 38 | DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); | 38 | DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); |
| 39 | 39 | ||
| 40 | /* | 40 | /* |
| 41 | * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR | 41 | * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR |
| 42 | * | 42 | * |
| 43 | * Following considerations are made so that this has minimal impact | 43 | * Following considerations are made so that this has minimal impact |
| 44 | * on scheduler hot path: | 44 | * on scheduler hot path: |
| @@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); | |||
| 51 | * simple as possible. | 51 | * simple as possible. |
| 52 | * Must be called with preemption disabled. | 52 | * Must be called with preemption disabled. |
| 53 | */ | 53 | */ |
| 54 | static void __intel_rdt_sched_in(void) | 54 | static void __resctrl_sched_in(void) |
| 55 | { | 55 | { |
| 56 | struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); | 56 | struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); |
| 57 | u32 closid = state->default_closid; | 57 | u32 closid = state->default_closid; |
| 58 | u32 rmid = state->default_rmid; | 58 | u32 rmid = state->default_rmid; |
| 59 | 59 | ||
| @@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void) | |||
| 78 | } | 78 | } |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static inline void intel_rdt_sched_in(void) | 81 | static inline void resctrl_sched_in(void) |
| 82 | { | 82 | { |
| 83 | if (static_branch_likely(&rdt_enable_key)) | 83 | if (static_branch_likely(&rdt_enable_key)) |
| 84 | __intel_rdt_sched_in(); | 84 | __resctrl_sched_in(); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | #else | 87 | #else |
| 88 | 88 | ||
| 89 | static inline void intel_rdt_sched_in(void) {} | 89 | static inline void resctrl_sched_in(void) {} |
| 90 | 90 | ||
| 91 | #endif /* CONFIG_INTEL_RDT */ | 91 | #endif /* CONFIG_RESCTRL */ |
| 92 | 92 | ||
| 93 | #endif /* _ASM_X86_INTEL_RDT_SCHED_H */ | 93 | #endif /* _ASM_X86_RESCTRL_SCHED_H */ |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 1f5d2291c31e..dc4acaa1549d 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
| @@ -36,13 +36,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o | |||
| 36 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o | 36 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o |
| 37 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o | 37 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o |
| 38 | 38 | ||
| 39 | obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o | ||
| 40 | obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o | ||
| 41 | CFLAGS_intel_rdt_pseudo_lock.o = -I$(src) | ||
| 42 | |||
| 43 | obj-$(CONFIG_X86_MCE) += mcheck/ | 39 | obj-$(CONFIG_X86_MCE) += mcheck/ |
| 44 | obj-$(CONFIG_MTRR) += mtrr/ | 40 | obj-$(CONFIG_MTRR) += mtrr/ |
| 45 | obj-$(CONFIG_MICROCODE) += microcode/ | 41 | obj-$(CONFIG_MICROCODE) += microcode/ |
| 42 | obj-$(CONFIG_RESCTRL) += resctrl/ | ||
| 46 | 43 | ||
| 47 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 44 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o |
| 48 | 45 | ||
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile new file mode 100644 index 000000000000..6895049ceef7 --- /dev/null +++ b/arch/x86/kernel/cpu/resctrl/Makefile | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 2 | obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o | ||
| 3 | obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o | ||
| 4 | CFLAGS_pseudo_lock.o = -I$(src) | ||
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/resctrl/core.c index 44272b7107ad..c3a9dc63edf2 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/resctrl/core.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | * Software Developer Manual June 2016, volume 3, section 17.17. | 22 | * Software Developer Manual June 2016, volume 3, section 17.17. |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 25 | #define pr_fmt(fmt) "resctrl: " fmt |
| 26 | 26 | ||
| 27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 28 | #include <linux/err.h> | 28 | #include <linux/err.h> |
| @@ -30,22 +30,19 @@ | |||
| 30 | #include <linux/cpuhotplug.h> | 30 | #include <linux/cpuhotplug.h> |
| 31 | 31 | ||
| 32 | #include <asm/intel-family.h> | 32 | #include <asm/intel-family.h> |
| 33 | #include <asm/intel_rdt_sched.h> | 33 | #include <asm/resctrl_sched.h> |
| 34 | #include "intel_rdt.h" | 34 | #include "internal.h" |
| 35 | |||
| 36 | #define MBA_IS_LINEAR 0x4 | ||
| 37 | #define MBA_MAX_MBPS U32_MAX | ||
| 38 | 35 | ||
| 39 | /* Mutex to protect rdtgroup access. */ | 36 | /* Mutex to protect rdtgroup access. */ |
| 40 | DEFINE_MUTEX(rdtgroup_mutex); | 37 | DEFINE_MUTEX(rdtgroup_mutex); |
| 41 | 38 | ||
| 42 | /* | 39 | /* |
| 43 | * The cached intel_pqr_state is strictly per CPU and can never be | 40 | * The cached resctrl_pqr_state is strictly per CPU and can never be |
| 44 | * updated from a remote CPU. Functions which modify the state | 41 | * updated from a remote CPU. Functions which modify the state |
| 45 | * are called with interrupts disabled and no preemption, which | 42 | * are called with interrupts disabled and no preemption, which |
| 46 | * is sufficient for the protection. | 43 | * is sufficient for the protection. |
| 47 | */ | 44 | */ |
| 48 | DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); | 45 | DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); |
| 49 | 46 | ||
| 50 | /* | 47 | /* |
| 51 | * Used to store the max resource name width and max resource data width | 48 | * Used to store the max resource name width and max resource data width |
| @@ -60,9 +57,13 @@ int max_name_width, max_data_width; | |||
| 60 | bool rdt_alloc_capable; | 57 | bool rdt_alloc_capable; |
| 61 | 58 | ||
| 62 | static void | 59 | static void |
| 63 | mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); | 60 | mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, |
| 61 | struct rdt_resource *r); | ||
| 64 | static void | 62 | static void |
| 65 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); | 63 | cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); |
| 64 | static void | ||
| 65 | mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, | ||
| 66 | struct rdt_resource *r); | ||
| 66 | 67 | ||
| 67 | #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains) | 68 | #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains) |
| 68 | 69 | ||
| @@ -72,7 +73,7 @@ struct rdt_resource rdt_resources_all[] = { | |||
| 72 | .rid = RDT_RESOURCE_L3, | 73 | .rid = RDT_RESOURCE_L3, |
| 73 | .name = "L3", | 74 | .name = "L3", |
| 74 | .domains = domain_init(RDT_RESOURCE_L3), | 75 | .domains = domain_init(RDT_RESOURCE_L3), |
| 75 | .msr_base = IA32_L3_CBM_BASE, | 76 | .msr_base = MSR_IA32_L3_CBM_BASE, |
| 76 | .msr_update = cat_wrmsr, | 77 | .msr_update = cat_wrmsr, |
| 77 | .cache_level = 3, | 78 | .cache_level = 3, |
| 78 | .cache = { | 79 | .cache = { |
| @@ -89,7 +90,7 @@ struct rdt_resource rdt_resources_all[] = { | |||
| 89 | .rid = RDT_RESOURCE_L3DATA, | 90 | .rid = RDT_RESOURCE_L3DATA, |
| 90 | .name = "L3DATA", | 91 | .name = "L3DATA", |
| 91 | .domains = domain_init(RDT_RESOURCE_L3DATA), | 92 | .domains = domain_init(RDT_RESOURCE_L3DATA), |
| 92 | .msr_base = IA32_L3_CBM_BASE, | 93 | .msr_base = MSR_IA32_L3_CBM_BASE, |
| 93 | .msr_update = cat_wrmsr, | 94 | .msr_update = cat_wrmsr, |
| 94 | .cache_level = 3, | 95 | .cache_level = 3, |
| 95 | .cache = { | 96 | .cache = { |
| @@ -106,7 +107,7 @@ struct rdt_resource rdt_resources_all[] = { | |||
| 106 | .rid = RDT_RESOURCE_L3CODE, | 107 | .rid = RDT_RESOURCE_L3CODE, |
| 107 | .name = "L3CODE", | 108 | .name = "L3CODE", |
| 108 | .domains = domain_init(RDT_RESOURCE_L3CODE), | 109 | .domains = domain_init(RDT_RESOURCE_L3CODE), |
| 109 | .msr_base = IA32_L3_CBM_BASE, | 110 | .msr_base = MSR_IA32_L3_CBM_BASE, |
| 110 | .msr_update = cat_wrmsr, | 111 | .msr_update = cat_wrmsr, |
| 111 | .cache_level = 3, | 112 | .cache_level = 3, |
| 112 | .cache = { | 113 | .cache = { |
| @@ -123,7 +124,7 @@ struct rdt_resource rdt_resources_all[] = { | |||
| 123 | .rid = RDT_RESOURCE_L2, | 124 | .rid = RDT_RESOURCE_L2, |
| 124 | .name = "L2", | 125 | .name = "L2", |
| 125 | .domains = domain_init(RDT_RESOURCE_L2), | 126 | .domains = domain_init(RDT_RESOURCE_L2), |
| 126 | .msr_base = IA32_L2_CBM_BASE, | 127 | .msr_base = MSR_IA32_L2_CBM_BASE, |
| 127 | .msr_update = cat_wrmsr, | 128 | .msr_update = cat_wrmsr, |
| 128 | .cache_level = 2, | 129 | .cache_level = 2, |
| 129 | .cache = { | 130 | .cache = { |
| @@ -140,7 +141,7 @@ struct rdt_resource rdt_resources_all[] = { | |||
| 140 | .rid = RDT_RESOURCE_L2DATA, | 141 | .rid = RDT_RESOURCE_L2DATA, |
| 141 | .name = "L2DATA", | 142 | .name = "L2DATA", |
| 142 | .domains = domain_init(RDT_RESOURCE_L2DATA), | 143 | .domains = domain_init(RDT_RESOURCE_L2DATA), |
| 143 | .msr_base = IA32_L2_CBM_BASE, | 144 | .msr_base = MSR_IA32_L2_CBM_BASE, |
| 144 | .msr_update = cat_wrmsr, | 145 | .msr_update = cat_wrmsr, |
| 145 | .cache_level = 2, | 146 | .cache_level = 2, |
| 146 | .cache = { | 147 | .cache = { |
| @@ -157,7 +158,7 @@ struct rdt_resource rdt_resources_all[] = { | |||
| 157 | .rid = RDT_RESOURCE_L2CODE, | 158 | .rid = RDT_RESOURCE_L2CODE, |
| 158 | .name = "L2CODE", | 159 | .name = "L2CODE", |
| 159 | .domains = domain_init(RDT_RESOURCE_L2CODE), | 160 | .domains = domain_init(RDT_RESOURCE_L2CODE), |
| 160 | .msr_base = IA32_L2_CBM_BASE, | 161 | .msr_base = MSR_IA32_L2_CBM_BASE, |
| 161 | .msr_update = cat_wrmsr, | 162 | .msr_update = cat_wrmsr, |
| 162 | .cache_level = 2, | 163 | .cache_level = 2, |
| 163 | .cache = { | 164 | .cache = { |
| @@ -174,10 +175,7 @@ struct rdt_resource rdt_resources_all[] = { | |||
| 174 | .rid = RDT_RESOURCE_MBA, | 175 | .rid = RDT_RESOURCE_MBA, |
| 175 | .name = "MB", | 176 | .name = "MB", |
| 176 | .domains = domain_init(RDT_RESOURCE_MBA), | 177 | .domains = domain_init(RDT_RESOURCE_MBA), |
| 177 | .msr_base = IA32_MBA_THRTL_BASE, | ||
| 178 | .msr_update = mba_wrmsr, | ||
| 179 | .cache_level = 3, | 178 | .cache_level = 3, |
| 180 | .parse_ctrlval = parse_bw, | ||
| 181 | .format_str = "%d=%*u", | 179 | .format_str = "%d=%*u", |
| 182 | .fflags = RFTYPE_RES_MB, | 180 | .fflags = RFTYPE_RES_MB, |
| 183 | }, | 181 | }, |
| @@ -211,9 +209,10 @@ static inline void cache_alloc_hsw_probe(void) | |||
| 211 | struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; | 209 | struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; |
| 212 | u32 l, h, max_cbm = BIT_MASK(20) - 1; | 210 | u32 l, h, max_cbm = BIT_MASK(20) - 1; |
| 213 | 211 | ||
| 214 | if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0)) | 212 | if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) |
| 215 | return; | 213 | return; |
| 216 | rdmsr(IA32_L3_CBM_BASE, l, h); | 214 | |
| 215 | rdmsr(MSR_IA32_L3_CBM_BASE, l, h); | ||
| 217 | 216 | ||
| 218 | /* If all the bits were set in MSR, return success */ | 217 | /* If all the bits were set in MSR, return success */ |
| 219 | if (l != max_cbm) | 218 | if (l != max_cbm) |
| @@ -259,7 +258,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r) | |||
| 259 | return false; | 258 | return false; |
| 260 | } | 259 | } |
| 261 | 260 | ||
| 262 | static bool rdt_get_mem_config(struct rdt_resource *r) | 261 | static bool __get_mem_config_intel(struct rdt_resource *r) |
| 263 | { | 262 | { |
| 264 | union cpuid_0x10_3_eax eax; | 263 | union cpuid_0x10_3_eax eax; |
| 265 | union cpuid_0x10_x_edx edx; | 264 | union cpuid_0x10_x_edx edx; |
| @@ -285,6 +284,30 @@ static bool rdt_get_mem_config(struct rdt_resource *r) | |||
| 285 | return true; | 284 | return true; |
| 286 | } | 285 | } |
| 287 | 286 | ||
| 287 | static bool __rdt_get_mem_config_amd(struct rdt_resource *r) | ||
| 288 | { | ||
| 289 | union cpuid_0x10_3_eax eax; | ||
| 290 | union cpuid_0x10_x_edx edx; | ||
| 291 | u32 ebx, ecx; | ||
| 292 | |||
| 293 | cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full); | ||
| 294 | r->num_closid = edx.split.cos_max + 1; | ||
| 295 | r->default_ctrl = MAX_MBA_BW_AMD; | ||
| 296 | |||
| 297 | /* AMD does not use delay */ | ||
| 298 | r->membw.delay_linear = false; | ||
| 299 | |||
| 300 | r->membw.min_bw = 0; | ||
| 301 | r->membw.bw_gran = 1; | ||
| 302 | /* Max value is 2048, Data width should be 4 in decimal */ | ||
| 303 | r->data_width = 4; | ||
| 304 | |||
| 305 | r->alloc_capable = true; | ||
| 306 | r->alloc_enabled = true; | ||
| 307 | |||
| 308 | return true; | ||
| 309 | } | ||
| 310 | |||
| 288 | static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) | 311 | static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) |
| 289 | { | 312 | { |
| 290 | union cpuid_0x10_1_eax eax; | 313 | union cpuid_0x10_1_eax eax; |
| @@ -344,6 +367,15 @@ static int get_cache_id(int cpu, int level) | |||
| 344 | return -1; | 367 | return -1; |
| 345 | } | 368 | } |
| 346 | 369 | ||
| 370 | static void | ||
| 371 | mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) | ||
| 372 | { | ||
| 373 | unsigned int i; | ||
| 374 | |||
| 375 | for (i = m->low; i < m->high; i++) | ||
| 376 | wrmsrl(r->msr_base + i, d->ctrl_val[i]); | ||
| 377 | } | ||
| 378 | |||
| 347 | /* | 379 | /* |
| 348 | * Map the memory b/w percentage value to delay values | 380 | * Map the memory b/w percentage value to delay values |
| 349 | * that can be written to QOS_MSRs. | 381 | * that can be written to QOS_MSRs. |
| @@ -359,7 +391,8 @@ u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) | |||
| 359 | } | 391 | } |
| 360 | 392 | ||
| 361 | static void | 393 | static void |
| 362 | mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) | 394 | mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, |
| 395 | struct rdt_resource *r) | ||
| 363 | { | 396 | { |
| 364 | unsigned int i; | 397 | unsigned int i; |
| 365 | 398 | ||
| @@ -421,7 +454,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, | |||
| 421 | struct list_head *l; | 454 | struct list_head *l; |
| 422 | 455 | ||
| 423 | if (id < 0) | 456 | if (id < 0) |
| 424 | return ERR_PTR(id); | 457 | return ERR_PTR(-ENODEV); |
| 425 | 458 | ||
| 426 | list_for_each(l, &r->domains) { | 459 | list_for_each(l, &r->domains) { |
| 427 | d = list_entry(l, struct rdt_domain, list); | 460 | d = list_entry(l, struct rdt_domain, list); |
| @@ -639,7 +672,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) | |||
| 639 | 672 | ||
| 640 | static void clear_closid_rmid(int cpu) | 673 | static void clear_closid_rmid(int cpu) |
| 641 | { | 674 | { |
| 642 | struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); | 675 | struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); |
| 643 | 676 | ||
| 644 | state->default_closid = 0; | 677 | state->default_closid = 0; |
| 645 | state->default_rmid = 0; | 678 | state->default_rmid = 0; |
| @@ -648,7 +681,7 @@ static void clear_closid_rmid(int cpu) | |||
| 648 | wrmsr(IA32_PQR_ASSOC, 0, 0); | 681 | wrmsr(IA32_PQR_ASSOC, 0, 0); |
| 649 | } | 682 | } |
| 650 | 683 | ||
| 651 | static int intel_rdt_online_cpu(unsigned int cpu) | 684 | static int resctrl_online_cpu(unsigned int cpu) |
| 652 | { | 685 | { |
| 653 | struct rdt_resource *r; | 686 | struct rdt_resource *r; |
| 654 | 687 | ||
| @@ -674,7 +707,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) | |||
| 674 | } | 707 | } |
| 675 | } | 708 | } |
| 676 | 709 | ||
| 677 | static int intel_rdt_offline_cpu(unsigned int cpu) | 710 | static int resctrl_offline_cpu(unsigned int cpu) |
| 678 | { | 711 | { |
| 679 | struct rdtgroup *rdtgrp; | 712 | struct rdtgroup *rdtgrp; |
| 680 | struct rdt_resource *r; | 713 | struct rdt_resource *r; |
| @@ -794,6 +827,19 @@ static bool __init rdt_cpu_has(int flag) | |||
| 794 | return ret; | 827 | return ret; |
| 795 | } | 828 | } |
| 796 | 829 | ||
| 830 | static __init bool get_mem_config(void) | ||
| 831 | { | ||
| 832 | if (!rdt_cpu_has(X86_FEATURE_MBA)) | ||
| 833 | return false; | ||
| 834 | |||
| 835 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | ||
| 836 | return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]); | ||
| 837 | else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | ||
| 838 | return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]); | ||
| 839 | |||
| 840 | return false; | ||
| 841 | } | ||
| 842 | |||
| 797 | static __init bool get_rdt_alloc_resources(void) | 843 | static __init bool get_rdt_alloc_resources(void) |
| 798 | { | 844 | { |
| 799 | bool ret = false; | 845 | bool ret = false; |
| @@ -818,10 +864,9 @@ static __init bool get_rdt_alloc_resources(void) | |||
| 818 | ret = true; | 864 | ret = true; |
| 819 | } | 865 | } |
| 820 | 866 | ||
| 821 | if (rdt_cpu_has(X86_FEATURE_MBA)) { | 867 | if (get_mem_config()) |
| 822 | if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA])) | 868 | ret = true; |
| 823 | ret = true; | 869 | |
| 824 | } | ||
| 825 | return ret; | 870 | return ret; |
| 826 | } | 871 | } |
| 827 | 872 | ||
| @@ -840,7 +885,7 @@ static __init bool get_rdt_mon_resources(void) | |||
| 840 | return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]); | 885 | return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]); |
| 841 | } | 886 | } |
| 842 | 887 | ||
| 843 | static __init void rdt_quirks(void) | 888 | static __init void __check_quirks_intel(void) |
| 844 | { | 889 | { |
| 845 | switch (boot_cpu_data.x86_model) { | 890 | switch (boot_cpu_data.x86_model) { |
| 846 | case INTEL_FAM6_HASWELL_X: | 891 | case INTEL_FAM6_HASWELL_X: |
| @@ -855,30 +900,91 @@ static __init void rdt_quirks(void) | |||
| 855 | } | 900 | } |
| 856 | } | 901 | } |
| 857 | 902 | ||
| 903 | static __init void check_quirks(void) | ||
| 904 | { | ||
| 905 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | ||
| 906 | __check_quirks_intel(); | ||
| 907 | } | ||
| 908 | |||
| 858 | static __init bool get_rdt_resources(void) | 909 | static __init bool get_rdt_resources(void) |
| 859 | { | 910 | { |
| 860 | rdt_quirks(); | ||
| 861 | rdt_alloc_capable = get_rdt_alloc_resources(); | 911 | rdt_alloc_capable = get_rdt_alloc_resources(); |
| 862 | rdt_mon_capable = get_rdt_mon_resources(); | 912 | rdt_mon_capable = get_rdt_mon_resources(); |
| 863 | 913 | ||
| 864 | return (rdt_mon_capable || rdt_alloc_capable); | 914 | return (rdt_mon_capable || rdt_alloc_capable); |
| 865 | } | 915 | } |
| 866 | 916 | ||
| 917 | static __init void rdt_init_res_defs_intel(void) | ||
| 918 | { | ||
| 919 | struct rdt_resource *r; | ||
| 920 | |||
| 921 | for_each_rdt_resource(r) { | ||
| 922 | if (r->rid == RDT_RESOURCE_L3 || | ||
| 923 | r->rid == RDT_RESOURCE_L3DATA || | ||
| 924 | r->rid == RDT_RESOURCE_L3CODE || | ||
| 925 | r->rid == RDT_RESOURCE_L2 || | ||
| 926 | r->rid == RDT_RESOURCE_L2DATA || | ||
| 927 | r->rid == RDT_RESOURCE_L2CODE) | ||
| 928 | r->cbm_validate = cbm_validate_intel; | ||
| 929 | else if (r->rid == RDT_RESOURCE_MBA) { | ||
| 930 | r->msr_base = MSR_IA32_MBA_THRTL_BASE; | ||
| 931 | r->msr_update = mba_wrmsr_intel; | ||
| 932 | r->parse_ctrlval = parse_bw_intel; | ||
| 933 | } | ||
| 934 | } | ||
| 935 | } | ||
| 936 | |||
| 937 | static __init void rdt_init_res_defs_amd(void) | ||
| 938 | { | ||
| 939 | struct rdt_resource *r; | ||
| 940 | |||
| 941 | for_each_rdt_resource(r) { | ||
| 942 | if (r->rid == RDT_RESOURCE_L3 || | ||
| 943 | r->rid == RDT_RESOURCE_L3DATA || | ||
| 944 | r->rid == RDT_RESOURCE_L3CODE || | ||
| 945 | r->rid == RDT_RESOURCE_L2 || | ||
| 946 | r->rid == RDT_RESOURCE_L2DATA || | ||
| 947 | r->rid == RDT_RESOURCE_L2CODE) | ||
| 948 | r->cbm_validate = cbm_validate_amd; | ||
| 949 | else if (r->rid == RDT_RESOURCE_MBA) { | ||
| 950 | r->msr_base = MSR_IA32_MBA_BW_BASE; | ||
| 951 | r->msr_update = mba_wrmsr_amd; | ||
| 952 | r->parse_ctrlval = parse_bw_amd; | ||
| 953 | } | ||
| 954 | } | ||
| 955 | } | ||
| 956 | |||
| 957 | static __init void rdt_init_res_defs(void) | ||
| 958 | { | ||
| 959 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | ||
| 960 | rdt_init_res_defs_intel(); | ||
| 961 | else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | ||
| 962 | rdt_init_res_defs_amd(); | ||
| 963 | } | ||
| 964 | |||
| 867 | static enum cpuhp_state rdt_online; | 965 | static enum cpuhp_state rdt_online; |
| 868 | 966 | ||
| 869 | static int __init intel_rdt_late_init(void) | 967 | static int __init resctrl_late_init(void) |
| 870 | { | 968 | { |
| 871 | struct rdt_resource *r; | 969 | struct rdt_resource *r; |
| 872 | int state, ret; | 970 | int state, ret; |
| 873 | 971 | ||
| 972 | /* | ||
| 973 | * Initialize functions(or definitions) that are different | ||
| 974 | * between vendors here. | ||
| 975 | */ | ||
| 976 | rdt_init_res_defs(); | ||
| 977 | |||
| 978 | check_quirks(); | ||
| 979 | |||
| 874 | if (!get_rdt_resources()) | 980 | if (!get_rdt_resources()) |
| 875 | return -ENODEV; | 981 | return -ENODEV; |
| 876 | 982 | ||
| 877 | rdt_init_padding(); | 983 | rdt_init_padding(); |
| 878 | 984 | ||
| 879 | state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, | 985 | state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, |
| 880 | "x86/rdt/cat:online:", | 986 | "x86/resctrl/cat:online:", |
| 881 | intel_rdt_online_cpu, intel_rdt_offline_cpu); | 987 | resctrl_online_cpu, resctrl_offline_cpu); |
| 882 | if (state < 0) | 988 | if (state < 0) |
| 883 | return state; | 989 | return state; |
| 884 | 990 | ||
| @@ -890,20 +996,20 @@ static int __init intel_rdt_late_init(void) | |||
| 890 | rdt_online = state; | 996 | rdt_online = state; |
| 891 | 997 | ||
| 892 | for_each_alloc_capable_rdt_resource(r) | 998 | for_each_alloc_capable_rdt_resource(r) |
| 893 | pr_info("Intel RDT %s allocation detected\n", r->name); | 999 | pr_info("%s allocation detected\n", r->name); |
| 894 | 1000 | ||
| 895 | for_each_mon_capable_rdt_resource(r) | 1001 | for_each_mon_capable_rdt_resource(r) |
| 896 | pr_info("Intel RDT %s monitoring detected\n", r->name); | 1002 | pr_info("%s monitoring detected\n", r->name); |
| 897 | 1003 | ||
| 898 | return 0; | 1004 | return 0; |
| 899 | } | 1005 | } |
| 900 | 1006 | ||
| 901 | late_initcall(intel_rdt_late_init); | 1007 | late_initcall(resctrl_late_init); |
| 902 | 1008 | ||
| 903 | static void __exit intel_rdt_exit(void) | 1009 | static void __exit resctrl_exit(void) |
| 904 | { | 1010 | { |
| 905 | cpuhp_remove_state(rdt_online); | 1011 | cpuhp_remove_state(rdt_online); |
| 906 | rdtgroup_exit(); | 1012 | rdtgroup_exit(); |
| 907 | } | 1013 | } |
| 908 | 1014 | ||
| 909 | __exitcall(intel_rdt_exit); | 1015 | __exitcall(resctrl_exit); |
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index efa4a519f5e5..2dbd990a2eb7 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c | |||
| @@ -27,7 +27,54 @@ | |||
| 27 | #include <linux/kernfs.h> | 27 | #include <linux/kernfs.h> |
| 28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
| 29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
| 30 | #include "intel_rdt.h" | 30 | #include "internal.h" |
| 31 | |||
| 32 | /* | ||
| 33 | * Check whether MBA bandwidth percentage value is correct. The value is | ||
| 34 | * checked against the minimum and maximum bandwidth values specified by | ||
| 35 | * the hardware. The allocated bandwidth percentage is rounded to the next | ||
| 36 | * control step available on the hardware. | ||
| 37 | */ | ||
| 38 | static bool bw_validate_amd(char *buf, unsigned long *data, | ||
| 39 | struct rdt_resource *r) | ||
| 40 | { | ||
| 41 | unsigned long bw; | ||
| 42 | int ret; | ||
| 43 | |||
| 44 | ret = kstrtoul(buf, 10, &bw); | ||
| 45 | if (ret) { | ||
| 46 | rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); | ||
| 47 | return false; | ||
| 48 | } | ||
| 49 | |||
| 50 | if (bw < r->membw.min_bw || bw > r->default_ctrl) { | ||
| 51 | rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, | ||
| 52 | r->membw.min_bw, r->default_ctrl); | ||
| 53 | return false; | ||
| 54 | } | ||
| 55 | |||
| 56 | *data = roundup(bw, (unsigned long)r->membw.bw_gran); | ||
| 57 | return true; | ||
| 58 | } | ||
| 59 | |||
| 60 | int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, | ||
| 61 | struct rdt_domain *d) | ||
| 62 | { | ||
| 63 | unsigned long bw_val; | ||
| 64 | |||
| 65 | if (d->have_new_ctrl) { | ||
| 66 | rdt_last_cmd_printf("Duplicate domain %d\n", d->id); | ||
| 67 | return -EINVAL; | ||
| 68 | } | ||
| 69 | |||
| 70 | if (!bw_validate_amd(data->buf, &bw_val, r)) | ||
| 71 | return -EINVAL; | ||
| 72 | |||
| 73 | d->new_ctrl = bw_val; | ||
| 74 | d->have_new_ctrl = true; | ||
| 75 | |||
| 76 | return 0; | ||
| 77 | } | ||
| 31 | 78 | ||
| 32 | /* | 79 | /* |
| 33 | * Check whether MBA bandwidth percentage value is correct. The value is | 80 | * Check whether MBA bandwidth percentage value is correct. The value is |
| @@ -65,13 +112,13 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) | |||
| 65 | return true; | 112 | return true; |
| 66 | } | 113 | } |
| 67 | 114 | ||
| 68 | int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, | 115 | int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, |
| 69 | struct rdt_domain *d) | 116 | struct rdt_domain *d) |
| 70 | { | 117 | { |
| 71 | unsigned long bw_val; | 118 | unsigned long bw_val; |
| 72 | 119 | ||
| 73 | if (d->have_new_ctrl) { | 120 | if (d->have_new_ctrl) { |
| 74 | rdt_last_cmd_printf("duplicate domain %d\n", d->id); | 121 | rdt_last_cmd_printf("Duplicate domain %d\n", d->id); |
| 75 | return -EINVAL; | 122 | return -EINVAL; |
| 76 | } | 123 | } |
| 77 | 124 | ||
| @@ -89,7 +136,7 @@ int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, | |||
| 89 | * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). | 136 | * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). |
| 90 | * Additionally Haswell requires at least two bits set. | 137 | * Additionally Haswell requires at least two bits set. |
| 91 | */ | 138 | */ |
| 92 | static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) | 139 | bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) |
| 93 | { | 140 | { |
| 94 | unsigned long first_bit, zero_bit, val; | 141 | unsigned long first_bit, zero_bit, val; |
| 95 | unsigned int cbm_len = r->cache.cbm_len; | 142 | unsigned int cbm_len = r->cache.cbm_len; |
| @@ -97,12 +144,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) | |||
| 97 | 144 | ||
| 98 | ret = kstrtoul(buf, 16, &val); | 145 | ret = kstrtoul(buf, 16, &val); |
| 99 | if (ret) { | 146 | if (ret) { |
| 100 | rdt_last_cmd_printf("non-hex character in mask %s\n", buf); | 147 | rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); |
| 101 | return false; | 148 | return false; |
| 102 | } | 149 | } |
| 103 | 150 | ||
| 104 | if (val == 0 || val > r->default_ctrl) { | 151 | if (val == 0 || val > r->default_ctrl) { |
| 105 | rdt_last_cmd_puts("mask out of range\n"); | 152 | rdt_last_cmd_puts("Mask out of range\n"); |
| 106 | return false; | 153 | return false; |
| 107 | } | 154 | } |
| 108 | 155 | ||
| @@ -110,12 +157,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) | |||
| 110 | zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); | 157 | zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); |
| 111 | 158 | ||
| 112 | if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) { | 159 | if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) { |
| 113 | rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val); | 160 | rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); |
| 114 | return false; | 161 | return false; |
| 115 | } | 162 | } |
| 116 | 163 | ||
| 117 | if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { | 164 | if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { |
| 118 | rdt_last_cmd_printf("Need at least %d bits in mask\n", | 165 | rdt_last_cmd_printf("Need at least %d bits in the mask\n", |
| 119 | r->cache.min_cbm_bits); | 166 | r->cache.min_cbm_bits); |
| 120 | return false; | 167 | return false; |
| 121 | } | 168 | } |
| @@ -125,6 +172,30 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) | |||
| 125 | } | 172 | } |
| 126 | 173 | ||
| 127 | /* | 174 | /* |
| 175 | * Check whether a cache bit mask is valid. AMD allows non-contiguous | ||
| 176 | * bitmasks | ||
| 177 | */ | ||
| 178 | bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r) | ||
| 179 | { | ||
| 180 | unsigned long val; | ||
| 181 | int ret; | ||
| 182 | |||
| 183 | ret = kstrtoul(buf, 16, &val); | ||
| 184 | if (ret) { | ||
| 185 | rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); | ||
| 186 | return false; | ||
| 187 | } | ||
| 188 | |||
| 189 | if (val > r->default_ctrl) { | ||
| 190 | rdt_last_cmd_puts("Mask out of range\n"); | ||
| 191 | return false; | ||
| 192 | } | ||
| 193 | |||
| 194 | *data = val; | ||
| 195 | return true; | ||
| 196 | } | ||
| 197 | |||
| 198 | /* | ||
| 128 | * Read one cache bit mask (hex). Check that it is valid for the current | 199 | * Read one cache bit mask (hex). Check that it is valid for the current |
| 129 | * resource type. | 200 | * resource type. |
| 130 | */ | 201 | */ |
| @@ -135,7 +206,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, | |||
| 135 | u32 cbm_val; | 206 | u32 cbm_val; |
| 136 | 207 | ||
| 137 | if (d->have_new_ctrl) { | 208 | if (d->have_new_ctrl) { |
| 138 | rdt_last_cmd_printf("duplicate domain %d\n", d->id); | 209 | rdt_last_cmd_printf("Duplicate domain %d\n", d->id); |
| 139 | return -EINVAL; | 210 | return -EINVAL; |
| 140 | } | 211 | } |
| 141 | 212 | ||
| @@ -145,17 +216,17 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, | |||
| 145 | */ | 216 | */ |
| 146 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && | 217 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && |
| 147 | rdtgroup_pseudo_locked_in_hierarchy(d)) { | 218 | rdtgroup_pseudo_locked_in_hierarchy(d)) { |
| 148 | rdt_last_cmd_printf("pseudo-locked region in hierarchy\n"); | 219 | rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); |
| 149 | return -EINVAL; | 220 | return -EINVAL; |
| 150 | } | 221 | } |
| 151 | 222 | ||
| 152 | if (!cbm_validate(data->buf, &cbm_val, r)) | 223 | if (!r->cbm_validate(data->buf, &cbm_val, r)) |
| 153 | return -EINVAL; | 224 | return -EINVAL; |
| 154 | 225 | ||
| 155 | if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || | 226 | if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || |
| 156 | rdtgrp->mode == RDT_MODE_SHAREABLE) && | 227 | rdtgrp->mode == RDT_MODE_SHAREABLE) && |
| 157 | rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { | 228 | rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { |
| 158 | rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n"); | 229 | rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); |
| 159 | return -EINVAL; | 230 | return -EINVAL; |
| 160 | } | 231 | } |
| 161 | 232 | ||
| @@ -164,14 +235,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, | |||
| 164 | * either is exclusive. | 235 | * either is exclusive. |
| 165 | */ | 236 | */ |
| 166 | if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) { | 237 | if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) { |
| 167 | rdt_last_cmd_printf("overlaps with exclusive group\n"); | 238 | rdt_last_cmd_puts("Overlaps with exclusive group\n"); |
| 168 | return -EINVAL; | 239 | return -EINVAL; |
| 169 | } | 240 | } |
| 170 | 241 | ||
| 171 | if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) { | 242 | if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) { |
| 172 | if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || | 243 | if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || |
| 173 | rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { | 244 | rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { |
| 174 | rdt_last_cmd_printf("overlaps with other group\n"); | 245 | rdt_last_cmd_puts("Overlaps with other group\n"); |
| 175 | return -EINVAL; | 246 | return -EINVAL; |
| 176 | } | 247 | } |
| 177 | } | 248 | } |
| @@ -293,7 +364,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, | |||
| 293 | if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid) | 364 | if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid) |
| 294 | return parse_line(tok, r, rdtgrp); | 365 | return parse_line(tok, r, rdtgrp); |
| 295 | } | 366 | } |
| 296 | rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname); | 367 | rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); |
| 297 | return -EINVAL; | 368 | return -EINVAL; |
| 298 | } | 369 | } |
| 299 | 370 | ||
| @@ -326,7 +397,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, | |||
| 326 | */ | 397 | */ |
| 327 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { | 398 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { |
| 328 | ret = -EINVAL; | 399 | ret = -EINVAL; |
| 329 | rdt_last_cmd_puts("resource group is pseudo-locked\n"); | 400 | rdt_last_cmd_puts("Resource group is pseudo-locked\n"); |
| 330 | goto out; | 401 | goto out; |
| 331 | } | 402 | } |
| 332 | 403 | ||
| @@ -467,7 +538,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) | |||
| 467 | 538 | ||
| 468 | r = &rdt_resources_all[resid]; | 539 | r = &rdt_resources_all[resid]; |
| 469 | d = rdt_find_domain(r, domid, NULL); | 540 | d = rdt_find_domain(r, domid, NULL); |
| 470 | if (!d) { | 541 | if (IS_ERR_OR_NULL(d)) { |
| 471 | ret = -ENOENT; | 542 | ret = -ENOENT; |
| 472 | goto out; | 543 | goto out; |
| 473 | } | 544 | } |
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/resctrl/internal.h index 3736f6dc9545..822b7db634ee 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h | |||
| @@ -1,20 +1,24 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_INTEL_RDT_H | 2 | #ifndef _ASM_X86_RESCTRL_INTERNAL_H |
| 3 | #define _ASM_X86_INTEL_RDT_H | 3 | #define _ASM_X86_RESCTRL_INTERNAL_H |
| 4 | 4 | ||
| 5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
| 6 | #include <linux/kernfs.h> | 6 | #include <linux/kernfs.h> |
| 7 | #include <linux/jump_label.h> | 7 | #include <linux/jump_label.h> |
| 8 | 8 | ||
| 9 | #define IA32_L3_QOS_CFG 0xc81 | 9 | #define MSR_IA32_L3_QOS_CFG 0xc81 |
| 10 | #define IA32_L2_QOS_CFG 0xc82 | 10 | #define MSR_IA32_L2_QOS_CFG 0xc82 |
| 11 | #define IA32_L3_CBM_BASE 0xc90 | 11 | #define MSR_IA32_L3_CBM_BASE 0xc90 |
| 12 | #define IA32_L2_CBM_BASE 0xd10 | 12 | #define MSR_IA32_L2_CBM_BASE 0xd10 |
| 13 | #define IA32_MBA_THRTL_BASE 0xd50 | 13 | #define MSR_IA32_MBA_THRTL_BASE 0xd50 |
| 14 | #define MSR_IA32_MBA_BW_BASE 0xc0000200 | ||
| 14 | 15 | ||
| 15 | #define L3_QOS_CDP_ENABLE 0x01ULL | 16 | #define MSR_IA32_QM_CTR 0x0c8e |
| 17 | #define MSR_IA32_QM_EVTSEL 0x0c8d | ||
| 16 | 18 | ||
| 17 | #define L2_QOS_CDP_ENABLE 0x01ULL | 19 | #define L3_QOS_CDP_ENABLE 0x01ULL |
| 20 | |||
| 21 | #define L2_QOS_CDP_ENABLE 0x01ULL | ||
| 18 | 22 | ||
| 19 | /* | 23 | /* |
| 20 | * Event IDs are used to program IA32_QM_EVTSEL before reading event | 24 | * Event IDs are used to program IA32_QM_EVTSEL before reading event |
| @@ -29,6 +33,9 @@ | |||
| 29 | #define MBM_CNTR_WIDTH 24 | 33 | #define MBM_CNTR_WIDTH 24 |
| 30 | #define MBM_OVERFLOW_INTERVAL 1000 | 34 | #define MBM_OVERFLOW_INTERVAL 1000 |
| 31 | #define MAX_MBA_BW 100u | 35 | #define MAX_MBA_BW 100u |
| 36 | #define MBA_IS_LINEAR 0x4 | ||
| 37 | #define MBA_MAX_MBPS U32_MAX | ||
| 38 | #define MAX_MBA_BW_AMD 0x800 | ||
| 32 | 39 | ||
| 33 | #define RMID_VAL_ERROR BIT_ULL(63) | 40 | #define RMID_VAL_ERROR BIT_ULL(63) |
| 34 | #define RMID_VAL_UNAVAIL BIT_ULL(62) | 41 | #define RMID_VAL_UNAVAIL BIT_ULL(62) |
| @@ -69,7 +76,7 @@ struct rmid_read { | |||
| 69 | u64 val; | 76 | u64 val; |
| 70 | }; | 77 | }; |
| 71 | 78 | ||
| 72 | extern unsigned int intel_cqm_threshold; | 79 | extern unsigned int resctrl_cqm_threshold; |
| 73 | extern bool rdt_alloc_capable; | 80 | extern bool rdt_alloc_capable; |
| 74 | extern bool rdt_mon_capable; | 81 | extern bool rdt_mon_capable; |
| 75 | extern unsigned int rdt_mon_features; | 82 | extern unsigned int rdt_mon_features; |
| @@ -391,9 +398,9 @@ struct rdt_parse_data { | |||
| 391 | * struct rdt_resource - attributes of an RDT resource | 398 | * struct rdt_resource - attributes of an RDT resource |
| 392 | * @rid: The index of the resource | 399 | * @rid: The index of the resource |
| 393 | * @alloc_enabled: Is allocation enabled on this machine | 400 | * @alloc_enabled: Is allocation enabled on this machine |
| 394 | * @mon_enabled: Is monitoring enabled for this feature | 401 | * @mon_enabled: Is monitoring enabled for this feature |
| 395 | * @alloc_capable: Is allocation available on this machine | 402 | * @alloc_capable: Is allocation available on this machine |
| 396 | * @mon_capable: Is monitor feature available on this machine | 403 | * @mon_capable: Is monitor feature available on this machine |
| 397 | * @name: Name to use in "schemata" file | 404 | * @name: Name to use in "schemata" file |
| 398 | * @num_closid: Number of CLOSIDs available | 405 | * @num_closid: Number of CLOSIDs available |
| 399 | * @cache_level: Which cache level defines scope of this resource | 406 | * @cache_level: Which cache level defines scope of this resource |
| @@ -405,10 +412,11 @@ struct rdt_parse_data { | |||
| 405 | * @cache: Cache allocation related data | 412 | * @cache: Cache allocation related data |
| 406 | * @format_str: Per resource format string to show domain value | 413 | * @format_str: Per resource format string to show domain value |
| 407 | * @parse_ctrlval: Per resource function pointer to parse control values | 414 | * @parse_ctrlval: Per resource function pointer to parse control values |
| 408 | * @evt_list: List of monitoring events | 415 | * @cbm_validate Cache bitmask validate function |
| 409 | * @num_rmid: Number of RMIDs available | 416 | * @evt_list: List of monitoring events |
| 410 | * @mon_scale: cqm counter * mon_scale = occupancy in bytes | 417 | * @num_rmid: Number of RMIDs available |
| 411 | * @fflags: flags to choose base and info files | 418 | * @mon_scale: cqm counter * mon_scale = occupancy in bytes |
| 419 | * @fflags: flags to choose base and info files | ||
| 412 | */ | 420 | */ |
| 413 | struct rdt_resource { | 421 | struct rdt_resource { |
| 414 | int rid; | 422 | int rid; |
| @@ -431,6 +439,7 @@ struct rdt_resource { | |||
| 431 | int (*parse_ctrlval)(struct rdt_parse_data *data, | 439 | int (*parse_ctrlval)(struct rdt_parse_data *data, |
| 432 | struct rdt_resource *r, | 440 | struct rdt_resource *r, |
| 433 | struct rdt_domain *d); | 441 | struct rdt_domain *d); |
| 442 | bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r); | ||
| 434 | struct list_head evt_list; | 443 | struct list_head evt_list; |
| 435 | int num_rmid; | 444 | int num_rmid; |
| 436 | unsigned int mon_scale; | 445 | unsigned int mon_scale; |
| @@ -439,8 +448,10 @@ struct rdt_resource { | |||
| 439 | 448 | ||
| 440 | int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, | 449 | int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, |
| 441 | struct rdt_domain *d); | 450 | struct rdt_domain *d); |
| 442 | int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, | 451 | int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, |
| 443 | struct rdt_domain *d); | 452 | struct rdt_domain *d); |
| 453 | int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, | ||
| 454 | struct rdt_domain *d); | ||
| 444 | 455 | ||
| 445 | extern struct mutex rdtgroup_mutex; | 456 | extern struct mutex rdtgroup_mutex; |
| 446 | 457 | ||
| @@ -463,6 +474,10 @@ enum { | |||
| 463 | RDT_NUM_RESOURCES, | 474 | RDT_NUM_RESOURCES, |
| 464 | }; | 475 | }; |
| 465 | 476 | ||
| 477 | #define for_each_rdt_resource(r) \ | ||
| 478 | for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ | ||
| 479 | r++) | ||
| 480 | |||
| 466 | #define for_each_capable_rdt_resource(r) \ | 481 | #define for_each_capable_rdt_resource(r) \ |
| 467 | for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ | 482 | for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ |
| 468 | r++) \ | 483 | r++) \ |
| @@ -567,5 +582,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); | |||
| 567 | void cqm_handle_limbo(struct work_struct *work); | 582 | void cqm_handle_limbo(struct work_struct *work); |
| 568 | bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); | 583 | bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); |
| 569 | void __check_limbo(struct rdt_domain *d, bool force_free); | 584 | void __check_limbo(struct rdt_domain *d, bool force_free); |
| 585 | bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r); | ||
| 586 | bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r); | ||
| 570 | 587 | ||
| 571 | #endif /* _ASM_X86_INTEL_RDT_H */ | 588 | #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ |
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index b0f3aed76b75..f33f11f69078 100644 --- a/arch/x86/kernel/cpu/intel_rdt_monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c | |||
| @@ -26,10 +26,7 @@ | |||
| 26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 28 | #include <asm/cpu_device_id.h> | 28 | #include <asm/cpu_device_id.h> |
| 29 | #include "intel_rdt.h" | 29 | #include "internal.h" |
| 30 | |||
| 31 | #define MSR_IA32_QM_CTR 0x0c8e | ||
| 32 | #define MSR_IA32_QM_EVTSEL 0x0c8d | ||
| 33 | 30 | ||
| 34 | struct rmid_entry { | 31 | struct rmid_entry { |
| 35 | u32 rmid; | 32 | u32 rmid; |
| @@ -73,7 +70,7 @@ unsigned int rdt_mon_features; | |||
| 73 | * This is the threshold cache occupancy at which we will consider an | 70 | * This is the threshold cache occupancy at which we will consider an |
| 74 | * RMID available for re-allocation. | 71 | * RMID available for re-allocation. |
| 75 | */ | 72 | */ |
| 76 | unsigned int intel_cqm_threshold; | 73 | unsigned int resctrl_cqm_threshold; |
| 77 | 74 | ||
| 78 | static inline struct rmid_entry *__rmid_entry(u32 rmid) | 75 | static inline struct rmid_entry *__rmid_entry(u32 rmid) |
| 79 | { | 76 | { |
| @@ -107,7 +104,7 @@ static bool rmid_dirty(struct rmid_entry *entry) | |||
| 107 | { | 104 | { |
| 108 | u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); | 105 | u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); |
| 109 | 106 | ||
| 110 | return val >= intel_cqm_threshold; | 107 | return val >= resctrl_cqm_threshold; |
| 111 | } | 108 | } |
| 112 | 109 | ||
| 113 | /* | 110 | /* |
| @@ -187,7 +184,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) | |||
| 187 | list_for_each_entry(d, &r->domains, list) { | 184 | list_for_each_entry(d, &r->domains, list) { |
| 188 | if (cpumask_test_cpu(cpu, &d->cpu_mask)) { | 185 | if (cpumask_test_cpu(cpu, &d->cpu_mask)) { |
| 189 | val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); | 186 | val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); |
| 190 | if (val <= intel_cqm_threshold) | 187 | if (val <= resctrl_cqm_threshold) |
| 191 | continue; | 188 | continue; |
| 192 | } | 189 | } |
| 193 | 190 | ||
| @@ -625,6 +622,7 @@ static void l3_mon_evt_init(struct rdt_resource *r) | |||
| 625 | 622 | ||
| 626 | int rdt_get_mon_l3_config(struct rdt_resource *r) | 623 | int rdt_get_mon_l3_config(struct rdt_resource *r) |
| 627 | { | 624 | { |
| 625 | unsigned int cl_size = boot_cpu_data.x86_cache_size; | ||
| 628 | int ret; | 626 | int ret; |
| 629 | 627 | ||
| 630 | r->mon_scale = boot_cpu_data.x86_cache_occ_scale; | 628 | r->mon_scale = boot_cpu_data.x86_cache_occ_scale; |
| @@ -637,10 +635,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r) | |||
| 637 | * | 635 | * |
| 638 | * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. | 636 | * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. |
| 639 | */ | 637 | */ |
| 640 | intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid; | 638 | resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid; |
| 641 | 639 | ||
| 642 | /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */ | 640 | /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */ |
| 643 | intel_cqm_threshold /= r->mon_scale; | 641 | resctrl_cqm_threshold /= r->mon_scale; |
| 644 | 642 | ||
| 645 | ret = dom_data_init(r); | 643 | ret = dom_data_init(r); |
| 646 | if (ret) | 644 | if (ret) |
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 815b4e92522c..14bed6af8377 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c | |||
| @@ -24,14 +24,14 @@ | |||
| 24 | 24 | ||
| 25 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
| 26 | #include <asm/intel-family.h> | 26 | #include <asm/intel-family.h> |
| 27 | #include <asm/intel_rdt_sched.h> | 27 | #include <asm/resctrl_sched.h> |
| 28 | #include <asm/perf_event.h> | 28 | #include <asm/perf_event.h> |
| 29 | 29 | ||
| 30 | #include "../../events/perf_event.h" /* For X86_CONFIG() */ | 30 | #include "../../events/perf_event.h" /* For X86_CONFIG() */ |
| 31 | #include "intel_rdt.h" | 31 | #include "internal.h" |
| 32 | 32 | ||
| 33 | #define CREATE_TRACE_POINTS | 33 | #define CREATE_TRACE_POINTS |
| 34 | #include "intel_rdt_pseudo_lock_event.h" | 34 | #include "pseudo_lock_event.h" |
| 35 | 35 | ||
| 36 | /* | 36 | /* |
| 37 | * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware | 37 | * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware |
| @@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) | |||
| 213 | for_each_cpu(cpu, &plr->d->cpu_mask) { | 213 | for_each_cpu(cpu, &plr->d->cpu_mask) { |
| 214 | pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); | 214 | pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); |
| 215 | if (!pm_req) { | 215 | if (!pm_req) { |
| 216 | rdt_last_cmd_puts("fail allocating mem for PM QoS\n"); | 216 | rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); |
| 217 | ret = -ENOMEM; | 217 | ret = -ENOMEM; |
| 218 | goto out_err; | 218 | goto out_err; |
| 219 | } | 219 | } |
| @@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) | |||
| 222 | DEV_PM_QOS_RESUME_LATENCY, | 222 | DEV_PM_QOS_RESUME_LATENCY, |
| 223 | 30); | 223 | 30); |
| 224 | if (ret < 0) { | 224 | if (ret < 0) { |
| 225 | rdt_last_cmd_printf("fail to add latency req cpu%d\n", | 225 | rdt_last_cmd_printf("Failed to add latency req CPU%d\n", |
| 226 | cpu); | 226 | cpu); |
| 227 | kfree(pm_req); | 227 | kfree(pm_req); |
| 228 | ret = -1; | 228 | ret = -1; |
| @@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) | |||
| 289 | plr->cpu = cpumask_first(&plr->d->cpu_mask); | 289 | plr->cpu = cpumask_first(&plr->d->cpu_mask); |
| 290 | 290 | ||
| 291 | if (!cpu_online(plr->cpu)) { | 291 | if (!cpu_online(plr->cpu)) { |
| 292 | rdt_last_cmd_printf("cpu %u associated with cache not online\n", | 292 | rdt_last_cmd_printf("CPU %u associated with cache not online\n", |
| 293 | plr->cpu); | 293 | plr->cpu); |
| 294 | ret = -ENODEV; | 294 | ret = -ENODEV; |
| 295 | goto out_region; | 295 | goto out_region; |
| @@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) | |||
| 307 | } | 307 | } |
| 308 | 308 | ||
| 309 | ret = -1; | 309 | ret = -1; |
| 310 | rdt_last_cmd_puts("unable to determine cache line size\n"); | 310 | rdt_last_cmd_puts("Unable to determine cache line size\n"); |
| 311 | out_region: | 311 | out_region: |
| 312 | pseudo_lock_region_clear(plr); | 312 | pseudo_lock_region_clear(plr); |
| 313 | return ret; | 313 | return ret; |
| @@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) | |||
| 361 | * KMALLOC_MAX_SIZE. | 361 | * KMALLOC_MAX_SIZE. |
| 362 | */ | 362 | */ |
| 363 | if (plr->size > KMALLOC_MAX_SIZE) { | 363 | if (plr->size > KMALLOC_MAX_SIZE) { |
| 364 | rdt_last_cmd_puts("requested region exceeds maximum size\n"); | 364 | rdt_last_cmd_puts("Requested region exceeds maximum size\n"); |
| 365 | ret = -E2BIG; | 365 | ret = -E2BIG; |
| 366 | goto out_region; | 366 | goto out_region; |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | plr->kmem = kzalloc(plr->size, GFP_KERNEL); | 369 | plr->kmem = kzalloc(plr->size, GFP_KERNEL); |
| 370 | if (!plr->kmem) { | 370 | if (!plr->kmem) { |
| 371 | rdt_last_cmd_puts("unable to allocate memory\n"); | 371 | rdt_last_cmd_puts("Unable to allocate memory\n"); |
| 372 | ret = -ENOMEM; | 372 | ret = -ENOMEM; |
| 373 | goto out_region; | 373 | goto out_region; |
| 374 | } | 374 | } |
| @@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) | |||
| 665 | * default closid associated with it. | 665 | * default closid associated with it. |
| 666 | */ | 666 | */ |
| 667 | if (rdtgrp == &rdtgroup_default) { | 667 | if (rdtgrp == &rdtgroup_default) { |
| 668 | rdt_last_cmd_puts("cannot pseudo-lock default group\n"); | 668 | rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); |
| 669 | return -EINVAL; | 669 | return -EINVAL; |
| 670 | } | 670 | } |
| 671 | 671 | ||
| @@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) | |||
| 707 | */ | 707 | */ |
| 708 | prefetch_disable_bits = get_prefetch_disable_bits(); | 708 | prefetch_disable_bits = get_prefetch_disable_bits(); |
| 709 | if (prefetch_disable_bits == 0) { | 709 | if (prefetch_disable_bits == 0) { |
| 710 | rdt_last_cmd_puts("pseudo-locking not supported\n"); | 710 | rdt_last_cmd_puts("Pseudo-locking not supported\n"); |
| 711 | return -EINVAL; | 711 | return -EINVAL; |
| 712 | } | 712 | } |
| 713 | 713 | ||
| 714 | if (rdtgroup_monitor_in_progress(rdtgrp)) { | 714 | if (rdtgroup_monitor_in_progress(rdtgrp)) { |
| 715 | rdt_last_cmd_puts("monitoring in progress\n"); | 715 | rdt_last_cmd_puts("Monitoring in progress\n"); |
| 716 | return -EINVAL; | 716 | return -EINVAL; |
| 717 | } | 717 | } |
| 718 | 718 | ||
| 719 | if (rdtgroup_tasks_assigned(rdtgrp)) { | 719 | if (rdtgroup_tasks_assigned(rdtgrp)) { |
| 720 | rdt_last_cmd_puts("tasks assigned to resource group\n"); | 720 | rdt_last_cmd_puts("Tasks assigned to resource group\n"); |
| 721 | return -EINVAL; | 721 | return -EINVAL; |
| 722 | } | 722 | } |
| 723 | 723 | ||
| @@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) | |||
| 727 | } | 727 | } |
| 728 | 728 | ||
| 729 | if (rdtgroup_locksetup_user_restrict(rdtgrp)) { | 729 | if (rdtgroup_locksetup_user_restrict(rdtgrp)) { |
| 730 | rdt_last_cmd_puts("unable to modify resctrl permissions\n"); | 730 | rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); |
| 731 | return -EIO; | 731 | return -EIO; |
| 732 | } | 732 | } |
| 733 | 733 | ||
| 734 | ret = pseudo_lock_init(rdtgrp); | 734 | ret = pseudo_lock_init(rdtgrp); |
| 735 | if (ret) { | 735 | if (ret) { |
| 736 | rdt_last_cmd_puts("unable to init pseudo-lock region\n"); | 736 | rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); |
| 737 | goto out_release; | 737 | goto out_release; |
| 738 | } | 738 | } |
| 739 | 739 | ||
| @@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) | |||
| 770 | if (rdt_mon_capable) { | 770 | if (rdt_mon_capable) { |
| 771 | ret = alloc_rmid(); | 771 | ret = alloc_rmid(); |
| 772 | if (ret < 0) { | 772 | if (ret < 0) { |
| 773 | rdt_last_cmd_puts("out of RMIDs\n"); | 773 | rdt_last_cmd_puts("Out of RMIDs\n"); |
| 774 | return ret; | 774 | return ret; |
| 775 | } | 775 | } |
| 776 | rdtgrp->mon.rmid = ret; | 776 | rdtgrp->mon.rmid = ret; |
| @@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) | |||
| 1304 | "pseudo_lock/%u", plr->cpu); | 1304 | "pseudo_lock/%u", plr->cpu); |
| 1305 | if (IS_ERR(thread)) { | 1305 | if (IS_ERR(thread)) { |
| 1306 | ret = PTR_ERR(thread); | 1306 | ret = PTR_ERR(thread); |
| 1307 | rdt_last_cmd_printf("locking thread returned error %d\n", ret); | 1307 | rdt_last_cmd_printf("Locking thread returned error %d\n", ret); |
| 1308 | goto out_cstates; | 1308 | goto out_cstates; |
| 1309 | } | 1309 | } |
| 1310 | 1310 | ||
| @@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) | |||
| 1322 | * the cleared, but not freed, plr struct resulting in an | 1322 | * the cleared, but not freed, plr struct resulting in an |
| 1323 | * empty pseudo-locking loop. | 1323 | * empty pseudo-locking loop. |
| 1324 | */ | 1324 | */ |
| 1325 | rdt_last_cmd_puts("locking thread interrupted\n"); | 1325 | rdt_last_cmd_puts("Locking thread interrupted\n"); |
| 1326 | goto out_cstates; | 1326 | goto out_cstates; |
| 1327 | } | 1327 | } |
| 1328 | 1328 | ||
| 1329 | ret = pseudo_lock_minor_get(&new_minor); | 1329 | ret = pseudo_lock_minor_get(&new_minor); |
| 1330 | if (ret < 0) { | 1330 | if (ret < 0) { |
| 1331 | rdt_last_cmd_puts("unable to obtain a new minor number\n"); | 1331 | rdt_last_cmd_puts("Unable to obtain a new minor number\n"); |
| 1332 | goto out_cstates; | 1332 | goto out_cstates; |
| 1333 | } | 1333 | } |
| 1334 | 1334 | ||
| @@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) | |||
| 1360 | 1360 | ||
| 1361 | if (IS_ERR(dev)) { | 1361 | if (IS_ERR(dev)) { |
| 1362 | ret = PTR_ERR(dev); | 1362 | ret = PTR_ERR(dev); |
| 1363 | rdt_last_cmd_printf("failed to create character device: %d\n", | 1363 | rdt_last_cmd_printf("Failed to create character device: %d\n", |
| 1364 | ret); | 1364 | ret); |
| 1365 | goto out_debugfs; | 1365 | goto out_debugfs; |
| 1366 | } | 1366 | } |
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h b/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h index 2c041e6d9f05..428ebbd4270b 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h | |||
| @@ -39,5 +39,5 @@ TRACE_EVENT(pseudo_lock_l3, | |||
| 39 | 39 | ||
| 40 | #undef TRACE_INCLUDE_PATH | 40 | #undef TRACE_INCLUDE_PATH |
| 41 | #define TRACE_INCLUDE_PATH . | 41 | #define TRACE_INCLUDE_PATH . |
| 42 | #define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event | 42 | #define TRACE_INCLUDE_FILE pseudo_lock_event |
| 43 | #include <trace/define_trace.h> | 43 | #include <trace/define_trace.h> |
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f27b8115ffa2..8388adf241b2 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c | |||
| @@ -35,8 +35,8 @@ | |||
| 35 | 35 | ||
| 36 | #include <uapi/linux/magic.h> | 36 | #include <uapi/linux/magic.h> |
| 37 | 37 | ||
| 38 | #include <asm/intel_rdt_sched.h> | 38 | #include <asm/resctrl_sched.h> |
| 39 | #include "intel_rdt.h" | 39 | #include "internal.h" |
| 40 | 40 | ||
| 41 | DEFINE_STATIC_KEY_FALSE(rdt_enable_key); | 41 | DEFINE_STATIC_KEY_FALSE(rdt_enable_key); |
| 42 | DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); | 42 | DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); |
| @@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, | |||
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | /* | 300 | /* |
| 301 | * This is safe against intel_rdt_sched_in() called from __switch_to() | 301 | * This is safe against resctrl_sched_in() called from __switch_to() |
| 302 | * because __switch_to() is executed with interrupts disabled. A local call | 302 | * because __switch_to() is executed with interrupts disabled. A local call |
| 303 | * from update_closid_rmid() is proteced against __switch_to() because | 303 | * from update_closid_rmid() is proteced against __switch_to() because |
| 304 | * preemption is disabled. | 304 | * preemption is disabled. |
| @@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info) | |||
| 317 | * executing task might have its own closid selected. Just reuse | 317 | * executing task might have its own closid selected. Just reuse |
| 318 | * the context switch code. | 318 | * the context switch code. |
| 319 | */ | 319 | */ |
| 320 | intel_rdt_sched_in(); | 320 | resctrl_sched_in(); |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | /* | 323 | /* |
| @@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, | |||
| 345 | /* Check whether cpus belong to parent ctrl group */ | 345 | /* Check whether cpus belong to parent ctrl group */ |
| 346 | cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); | 346 | cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); |
| 347 | if (cpumask_weight(tmpmask)) { | 347 | if (cpumask_weight(tmpmask)) { |
| 348 | rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n"); | 348 | rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); |
| 349 | return -EINVAL; | 349 | return -EINVAL; |
| 350 | } | 350 | } |
| 351 | 351 | ||
| @@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, | |||
| 470 | rdt_last_cmd_clear(); | 470 | rdt_last_cmd_clear(); |
| 471 | if (!rdtgrp) { | 471 | if (!rdtgrp) { |
| 472 | ret = -ENOENT; | 472 | ret = -ENOENT; |
| 473 | rdt_last_cmd_puts("directory was removed\n"); | 473 | rdt_last_cmd_puts("Directory was removed\n"); |
| 474 | goto unlock; | 474 | goto unlock; |
| 475 | } | 475 | } |
| 476 | 476 | ||
| 477 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || | 477 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || |
| 478 | rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { | 478 | rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { |
| 479 | ret = -EINVAL; | 479 | ret = -EINVAL; |
| 480 | rdt_last_cmd_puts("pseudo-locking in progress\n"); | 480 | rdt_last_cmd_puts("Pseudo-locking in progress\n"); |
| 481 | goto unlock; | 481 | goto unlock; |
| 482 | } | 482 | } |
| 483 | 483 | ||
| @@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, | |||
| 487 | ret = cpumask_parse(buf, newmask); | 487 | ret = cpumask_parse(buf, newmask); |
| 488 | 488 | ||
| 489 | if (ret) { | 489 | if (ret) { |
| 490 | rdt_last_cmd_puts("bad cpu list/mask\n"); | 490 | rdt_last_cmd_puts("Bad CPU list/mask\n"); |
| 491 | goto unlock; | 491 | goto unlock; |
| 492 | } | 492 | } |
| 493 | 493 | ||
| @@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, | |||
| 495 | cpumask_andnot(tmpmask, newmask, cpu_online_mask); | 495 | cpumask_andnot(tmpmask, newmask, cpu_online_mask); |
| 496 | if (cpumask_weight(tmpmask)) { | 496 | if (cpumask_weight(tmpmask)) { |
| 497 | ret = -EINVAL; | 497 | ret = -EINVAL; |
| 498 | rdt_last_cmd_puts("can only assign online cpus\n"); | 498 | rdt_last_cmd_puts("Can only assign online CPUs\n"); |
| 499 | goto unlock; | 499 | goto unlock; |
| 500 | } | 500 | } |
| 501 | 501 | ||
| @@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head) | |||
| 542 | 542 | ||
| 543 | preempt_disable(); | 543 | preempt_disable(); |
| 544 | /* update PQR_ASSOC MSR to make resource group go into effect */ | 544 | /* update PQR_ASSOC MSR to make resource group go into effect */ |
| 545 | intel_rdt_sched_in(); | 545 | resctrl_sched_in(); |
| 546 | preempt_enable(); | 546 | preempt_enable(); |
| 547 | 547 | ||
| 548 | kfree(callback); | 548 | kfree(callback); |
| @@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk, | |||
| 574 | */ | 574 | */ |
| 575 | atomic_dec(&rdtgrp->waitcount); | 575 | atomic_dec(&rdtgrp->waitcount); |
| 576 | kfree(callback); | 576 | kfree(callback); |
| 577 | rdt_last_cmd_puts("task exited\n"); | 577 | rdt_last_cmd_puts("Task exited\n"); |
| 578 | } else { | 578 | } else { |
| 579 | /* | 579 | /* |
| 580 | * For ctrl_mon groups move both closid and rmid. | 580 | * For ctrl_mon groups move both closid and rmid. |
| @@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, | |||
| 692 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || | 692 | if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || |
| 693 | rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { | 693 | rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { |
| 694 | ret = -EINVAL; | 694 | ret = -EINVAL; |
| 695 | rdt_last_cmd_puts("pseudo-locking in progress\n"); | 695 | rdt_last_cmd_puts("Pseudo-locking in progress\n"); |
| 696 | goto unlock; | 696 | goto unlock; |
| 697 | } | 697 | } |
| 698 | 698 | ||
| @@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of, | |||
| 926 | { | 926 | { |
| 927 | struct rdt_resource *r = of->kn->parent->priv; | 927 | struct rdt_resource *r = of->kn->parent->priv; |
| 928 | 928 | ||
| 929 | seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale); | 929 | seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale); |
| 930 | 930 | ||
| 931 | return 0; | 931 | return 0; |
| 932 | } | 932 | } |
| @@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, | |||
| 945 | if (bytes > (boot_cpu_data.x86_cache_size * 1024)) | 945 | if (bytes > (boot_cpu_data.x86_cache_size * 1024)) |
| 946 | return -EINVAL; | 946 | return -EINVAL; |
| 947 | 947 | ||
| 948 | intel_cqm_threshold = bytes / r->mon_scale; | 948 | resctrl_cqm_threshold = bytes / r->mon_scale; |
| 949 | 949 | ||
| 950 | return nbytes; | 950 | return nbytes; |
| 951 | } | 951 | } |
| @@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, | |||
| 1029 | * peer RDT CDP resource. Hence the WARN. | 1029 | * peer RDT CDP resource. Hence the WARN. |
| 1030 | */ | 1030 | */ |
| 1031 | _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); | 1031 | _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); |
| 1032 | if (WARN_ON(!_d_cdp)) { | 1032 | if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { |
| 1033 | _r_cdp = NULL; | 1033 | _r_cdp = NULL; |
| 1034 | ret = -EINVAL; | 1034 | ret = -EINVAL; |
| 1035 | } | 1035 | } |
| @@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) | |||
| 1158 | list_for_each_entry(d, &r->domains, list) { | 1158 | list_for_each_entry(d, &r->domains, list) { |
| 1159 | if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], | 1159 | if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], |
| 1160 | rdtgrp->closid, false)) { | 1160 | rdtgrp->closid, false)) { |
| 1161 | rdt_last_cmd_puts("schemata overlaps\n"); | 1161 | rdt_last_cmd_puts("Schemata overlaps\n"); |
| 1162 | return false; | 1162 | return false; |
| 1163 | } | 1163 | } |
| 1164 | } | 1164 | } |
| 1165 | } | 1165 | } |
| 1166 | 1166 | ||
| 1167 | if (!has_cache) { | 1167 | if (!has_cache) { |
| 1168 | rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n"); | 1168 | rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); |
| 1169 | return false; | 1169 | return false; |
| 1170 | } | 1170 | } |
| 1171 | 1171 | ||
| @@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, | |||
| 1206 | goto out; | 1206 | goto out; |
| 1207 | 1207 | ||
| 1208 | if (mode == RDT_MODE_PSEUDO_LOCKED) { | 1208 | if (mode == RDT_MODE_PSEUDO_LOCKED) { |
| 1209 | rdt_last_cmd_printf("cannot change pseudo-locked group\n"); | 1209 | rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); |
| 1210 | ret = -EINVAL; | 1210 | ret = -EINVAL; |
| 1211 | goto out; | 1211 | goto out; |
| 1212 | } | 1212 | } |
| @@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, | |||
| 1235 | goto out; | 1235 | goto out; |
| 1236 | rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; | 1236 | rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; |
| 1237 | } else { | 1237 | } else { |
| 1238 | rdt_last_cmd_printf("unknown/unsupported mode\n"); | 1238 | rdt_last_cmd_puts("Unknown or unsupported mode\n"); |
| 1239 | ret = -EINVAL; | 1239 | ret = -EINVAL; |
| 1240 | } | 1240 | } |
| 1241 | 1241 | ||
| @@ -1722,14 +1722,14 @@ static void l3_qos_cfg_update(void *arg) | |||
| 1722 | { | 1722 | { |
| 1723 | bool *enable = arg; | 1723 | bool *enable = arg; |
| 1724 | 1724 | ||
| 1725 | wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); | 1725 | wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); |
| 1726 | } | 1726 | } |
| 1727 | 1727 | ||
| 1728 | static void l2_qos_cfg_update(void *arg) | 1728 | static void l2_qos_cfg_update(void *arg) |
| 1729 | { | 1729 | { |
| 1730 | bool *enable = arg; | 1730 | bool *enable = arg; |
| 1731 | 1731 | ||
| 1732 | wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); | 1732 | wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); |
| 1733 | } | 1733 | } |
| 1734 | 1734 | ||
| 1735 | static inline bool is_mba_linear(void) | 1735 | static inline bool is_mba_linear(void) |
| @@ -1878,7 +1878,10 @@ static int parse_rdtgroupfs_options(char *data) | |||
| 1878 | if (ret) | 1878 | if (ret) |
| 1879 | goto out; | 1879 | goto out; |
| 1880 | } else if (!strcmp(token, "mba_MBps")) { | 1880 | } else if (!strcmp(token, "mba_MBps")) { |
| 1881 | ret = set_mba_sc(true); | 1881 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
| 1882 | ret = set_mba_sc(true); | ||
| 1883 | else | ||
| 1884 | ret = -EINVAL; | ||
| 1882 | if (ret) | 1885 | if (ret) |
| 1883 | goto out; | 1886 | goto out; |
| 1884 | } else { | 1887 | } else { |
| @@ -2540,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) | |||
| 2540 | tmp_cbm = d->new_ctrl; | 2543 | tmp_cbm = d->new_ctrl; |
| 2541 | if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < | 2544 | if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < |
| 2542 | r->cache.min_cbm_bits) { | 2545 | r->cache.min_cbm_bits) { |
| 2543 | rdt_last_cmd_printf("no space on %s:%d\n", | 2546 | rdt_last_cmd_printf("No space on %s:%d\n", |
| 2544 | r->name, d->id); | 2547 | r->name, d->id); |
| 2545 | return -ENOSPC; | 2548 | return -ENOSPC; |
| 2546 | } | 2549 | } |
| @@ -2557,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) | |||
| 2557 | continue; | 2560 | continue; |
| 2558 | ret = update_domains(r, rdtgrp->closid); | 2561 | ret = update_domains(r, rdtgrp->closid); |
| 2559 | if (ret < 0) { | 2562 | if (ret < 0) { |
| 2560 | rdt_last_cmd_puts("failed to initialize allocations\n"); | 2563 | rdt_last_cmd_puts("Failed to initialize allocations\n"); |
| 2561 | return ret; | 2564 | return ret; |
| 2562 | } | 2565 | } |
| 2563 | rdtgrp->mode = RDT_MODE_SHAREABLE; | 2566 | rdtgrp->mode = RDT_MODE_SHAREABLE; |
| @@ -2580,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, | |||
| 2580 | rdt_last_cmd_clear(); | 2583 | rdt_last_cmd_clear(); |
| 2581 | if (!prdtgrp) { | 2584 | if (!prdtgrp) { |
| 2582 | ret = -ENODEV; | 2585 | ret = -ENODEV; |
| 2583 | rdt_last_cmd_puts("directory was removed\n"); | 2586 | rdt_last_cmd_puts("Directory was removed\n"); |
| 2584 | goto out_unlock; | 2587 | goto out_unlock; |
| 2585 | } | 2588 | } |
| 2586 | 2589 | ||
| @@ -2588,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, | |||
| 2588 | (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || | 2591 | (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || |
| 2589 | prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { | 2592 | prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { |
| 2590 | ret = -EINVAL; | 2593 | ret = -EINVAL; |
| 2591 | rdt_last_cmd_puts("pseudo-locking in progress\n"); | 2594 | rdt_last_cmd_puts("Pseudo-locking in progress\n"); |
| 2592 | goto out_unlock; | 2595 | goto out_unlock; |
| 2593 | } | 2596 | } |
| 2594 | 2597 | ||
| @@ -2596,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, | |||
| 2596 | rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); | 2599 | rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); |
| 2597 | if (!rdtgrp) { | 2600 | if (!rdtgrp) { |
| 2598 | ret = -ENOSPC; | 2601 | ret = -ENOSPC; |
| 2599 | rdt_last_cmd_puts("kernel out of memory\n"); | 2602 | rdt_last_cmd_puts("Kernel out of memory\n"); |
| 2600 | goto out_unlock; | 2603 | goto out_unlock; |
| 2601 | } | 2604 | } |
| 2602 | *r = rdtgrp; | 2605 | *r = rdtgrp; |
| @@ -2637,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, | |||
| 2637 | if (rdt_mon_capable) { | 2640 | if (rdt_mon_capable) { |
| 2638 | ret = alloc_rmid(); | 2641 | ret = alloc_rmid(); |
| 2639 | if (ret < 0) { | 2642 | if (ret < 0) { |
| 2640 | rdt_last_cmd_puts("out of RMIDs\n"); | 2643 | rdt_last_cmd_puts("Out of RMIDs\n"); |
| 2641 | goto out_destroy; | 2644 | goto out_destroy; |
| 2642 | } | 2645 | } |
| 2643 | rdtgrp->mon.rmid = ret; | 2646 | rdtgrp->mon.rmid = ret; |
| @@ -2725,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, | |||
| 2725 | kn = rdtgrp->kn; | 2728 | kn = rdtgrp->kn; |
| 2726 | ret = closid_alloc(); | 2729 | ret = closid_alloc(); |
| 2727 | if (ret < 0) { | 2730 | if (ret < 0) { |
| 2728 | rdt_last_cmd_puts("out of CLOSIDs\n"); | 2731 | rdt_last_cmd_puts("Out of CLOSIDs\n"); |
| 2729 | goto out_common_fail; | 2732 | goto out_common_fail; |
| 2730 | } | 2733 | } |
| 2731 | closid = ret; | 2734 | closid = ret; |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 772c219b6889..a4d74d616222 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
| @@ -17,7 +17,11 @@ struct cpuid_bit { | |||
| 17 | u32 sub_leaf; | 17 | u32 sub_leaf; |
| 18 | }; | 18 | }; |
| 19 | 19 | ||
| 20 | /* Please keep the leaf sorted by cpuid_bit.level for faster search. */ | 20 | /* |
| 21 | * Please keep the leaf sorted by cpuid_bit.level for faster search. | ||
| 22 | * X86_FEATURE_MBA is supported by both Intel and AMD. But the CPUID | ||
| 23 | * levels are different and there is a separate entry for each. | ||
| 24 | */ | ||
| 21 | static const struct cpuid_bit cpuid_bits[] = { | 25 | static const struct cpuid_bit cpuid_bits[] = { |
| 22 | { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, | 26 | { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, |
| 23 | { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, | 27 | { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, |
| @@ -29,6 +33,7 @@ static const struct cpuid_bit cpuid_bits[] = { | |||
| 29 | { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, | 33 | { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, |
| 30 | { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, | 34 | { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, |
| 31 | { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, | 35 | { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, |
| 36 | { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, | ||
| 32 | { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 }, | 37 | { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 }, |
| 33 | { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 }, | 38 | { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 }, |
| 34 | { 0, 0, 0, 0, 0 } | 39 | { 0, 0, 0, 0, 0 } |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index d3e593eb189f..9d08f0510620 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
| @@ -56,7 +56,7 @@ | |||
| 56 | #include <asm/debugreg.h> | 56 | #include <asm/debugreg.h> |
| 57 | #include <asm/switch_to.h> | 57 | #include <asm/switch_to.h> |
| 58 | #include <asm/vm86.h> | 58 | #include <asm/vm86.h> |
| 59 | #include <asm/intel_rdt_sched.h> | 59 | #include <asm/resctrl_sched.h> |
| 60 | #include <asm/proto.h> | 60 | #include <asm/proto.h> |
| 61 | 61 | ||
| 62 | #include "process.h" | 62 | #include "process.h" |
| @@ -298,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 298 | this_cpu_write(current_task, next_p); | 298 | this_cpu_write(current_task, next_p); |
| 299 | 299 | ||
| 300 | /* Load the Intel cache allocation PQR MSR. */ | 300 | /* Load the Intel cache allocation PQR MSR. */ |
| 301 | intel_rdt_sched_in(); | 301 | resctrl_sched_in(); |
| 302 | 302 | ||
| 303 | return prev_p; | 303 | return prev_p; |
| 304 | } | 304 | } |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ddd4fa718c43..60783b318936 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -52,7 +52,7 @@ | |||
| 52 | #include <asm/switch_to.h> | 52 | #include <asm/switch_to.h> |
| 53 | #include <asm/xen/hypervisor.h> | 53 | #include <asm/xen/hypervisor.h> |
| 54 | #include <asm/vdso.h> | 54 | #include <asm/vdso.h> |
| 55 | #include <asm/intel_rdt_sched.h> | 55 | #include <asm/resctrl_sched.h> |
| 56 | #include <asm/unistd.h> | 56 | #include <asm/unistd.h> |
| 57 | #include <asm/fsgsbase.h> | 57 | #include <asm/fsgsbase.h> |
| 58 | #ifdef CONFIG_IA32_EMULATION | 58 | #ifdef CONFIG_IA32_EMULATION |
| @@ -622,7 +622,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 622 | } | 622 | } |
| 623 | 623 | ||
| 624 | /* Load the Intel cache allocation PQR MSR. */ | 624 | /* Load the Intel cache allocation PQR MSR. */ |
| 625 | intel_rdt_sched_in(); | 625 | resctrl_sched_in(); |
| 626 | 626 | ||
| 627 | return prev_p; | 627 | return prev_p; |
| 628 | } | 628 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 291a9bd5b97f..b3c51e869388 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -993,7 +993,7 @@ struct task_struct { | |||
| 993 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ | 993 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ |
| 994 | struct list_head cg_list; | 994 | struct list_head cg_list; |
| 995 | #endif | 995 | #endif |
| 996 | #ifdef CONFIG_INTEL_RDT | 996 | #ifdef CONFIG_RESCTRL |
| 997 | u32 closid; | 997 | u32 closid; |
| 998 | u32 rmid; | 998 | u32 rmid; |
| 999 | #endif | 999 | #endif |
