aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2017-12-20 17:57:23 -0500
committerThomas Gleixner <tglx@linutronix.de>2018-01-18 03:33:31 -0500
commit99adde9b370de8e07ef76630c6f60dbf586cdf0e (patch)
tree0ad73484ae0f8b38425bcf213338d2214f762daf
parentdef10853930a82456ab862a3a8292a3a16c386e7 (diff)
x86/intel_rdt: Enable L2 CDP in MSR IA32_L2_QOS_CFG
Bit 0 in MSR IA32_L2_QOS_CFG (0xc82) is L2 CDP enable bit. By default, the bit is zero, i.e. L2 CAT is enabled, and L2 CDP is disabled. When the resctrl mount parameter "cdpl2" is given, the bit is set to 1 and L2 CDP is enabled. In L2 CDP mode, the L2 CAT mask MSRs are re-mapped into interleaved pairs of mask MSRs for code (referenced by an odd CLOSID) and data (referenced by an even CLOSID). Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: "Ravi V Shankar" <ravi.v.shankar@intel.com> Cc: "Tony Luck" <tony.luck@intel.com> Cc: Vikas" <vikas.shivappa@intel.com> Cc: Sai Praneeth" <sai.praneeth.prakhya@intel.com> Cc: Reinette" <reinette.chatre@intel.com> Link: https://lkml.kernel.org/r/1513810644-78015-6-git-send-email-fenghua.yu@intel.com
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h3
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c117
2 files changed, 94 insertions, 26 deletions
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 19ffc5a7c116..3fd7a70ee04a 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -7,12 +7,15 @@
7#include <linux/jump_label.h> 7#include <linux/jump_label.h>
8 8
9#define IA32_L3_QOS_CFG 0xc81 9#define IA32_L3_QOS_CFG 0xc81
10#define IA32_L2_QOS_CFG 0xc82
10#define IA32_L3_CBM_BASE 0xc90 11#define IA32_L3_CBM_BASE 0xc90
11#define IA32_L2_CBM_BASE 0xd10 12#define IA32_L2_CBM_BASE 0xd10
12#define IA32_MBA_THRTL_BASE 0xd50 13#define IA32_MBA_THRTL_BASE 0xd50
13 14
14#define L3_QOS_CDP_ENABLE 0x01ULL 15#define L3_QOS_CDP_ENABLE 0x01ULL
15 16
17#define L2_QOS_CDP_ENABLE 0x01ULL
18
16/* 19/*
17 * Event IDs are used to program IA32_QM_EVTSEL before reading event 20 * Event IDs are used to program IA32_QM_EVTSEL before reading event
18 * counter from IA32_QM_CTR 21 * counter from IA32_QM_CTR
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 64c5ff97ee0d..bdab7d2f51af 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -990,6 +990,7 @@ out_destroy:
990 kernfs_remove(kn); 990 kernfs_remove(kn);
991 return ret; 991 return ret;
992} 992}
993
993static void l3_qos_cfg_update(void *arg) 994static void l3_qos_cfg_update(void *arg)
994{ 995{
995 bool *enable = arg; 996 bool *enable = arg;
@@ -997,8 +998,17 @@ static void l3_qos_cfg_update(void *arg)
997 wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 998 wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
998} 999}
999 1000
1000static int set_l3_qos_cfg(struct rdt_resource *r, bool enable) 1001static void l2_qos_cfg_update(void *arg)
1001{ 1002{
1003 bool *enable = arg;
1004
1005 wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1006}
1007
1008static int set_cache_qos_cfg(int level, bool enable)
1009{
1010 void (*update)(void *arg);
1011 struct rdt_resource *r_l;
1002 cpumask_var_t cpu_mask; 1012 cpumask_var_t cpu_mask;
1003 struct rdt_domain *d; 1013 struct rdt_domain *d;
1004 int cpu; 1014 int cpu;
@@ -1006,16 +1016,24 @@ static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
1006 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 1016 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1007 return -ENOMEM; 1017 return -ENOMEM;
1008 1018
1009 list_for_each_entry(d, &r->domains, list) { 1019 if (level == RDT_RESOURCE_L3)
1020 update = l3_qos_cfg_update;
1021 else if (level == RDT_RESOURCE_L2)
1022 update = l2_qos_cfg_update;
1023 else
1024 return -EINVAL;
1025
1026 r_l = &rdt_resources_all[level];
1027 list_for_each_entry(d, &r_l->domains, list) {
1010 /* Pick one CPU from each domain instance to update MSR */ 1028 /* Pick one CPU from each domain instance to update MSR */
1011 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 1029 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1012 } 1030 }
1013 cpu = get_cpu(); 1031 cpu = get_cpu();
1014 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */ 1032 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1015 if (cpumask_test_cpu(cpu, cpu_mask)) 1033 if (cpumask_test_cpu(cpu, cpu_mask))
1016 l3_qos_cfg_update(&enable); 1034 update(&enable);
1017 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */ 1035 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1018 smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1); 1036 smp_call_function_many(cpu_mask, update, &enable, 1);
1019 put_cpu(); 1037 put_cpu();
1020 1038
1021 free_cpumask_var(cpu_mask); 1039 free_cpumask_var(cpu_mask);
@@ -1023,52 +1041,99 @@ static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
1023 return 0; 1041 return 0;
1024} 1042}
1025 1043
1026static int cdp_enable(void) 1044static int cdp_enable(int level, int data_type, int code_type)
1027{ 1045{
1028 struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA]; 1046 struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
1029 struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE]; 1047 struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
1030 struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3]; 1048 struct rdt_resource *r_l = &rdt_resources_all[level];
1031 int ret; 1049 int ret;
1032 1050
1033 if (!r_l3->alloc_capable || !r_l3data->alloc_capable || 1051 if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
1034 !r_l3code->alloc_capable) 1052 !r_lcode->alloc_capable)
1035 return -EINVAL; 1053 return -EINVAL;
1036 1054
1037 ret = set_l3_qos_cfg(r_l3, true); 1055 ret = set_cache_qos_cfg(level, true);
1038 if (!ret) { 1056 if (!ret) {
1039 r_l3->alloc_enabled = false; 1057 r_l->alloc_enabled = false;
1040 r_l3data->alloc_enabled = true; 1058 r_ldata->alloc_enabled = true;
1041 r_l3code->alloc_enabled = true; 1059 r_lcode->alloc_enabled = true;
1042 } 1060 }
1043 return ret; 1061 return ret;
1044} 1062}
1045 1063
1046static void cdp_disable(void) 1064static int cdpl3_enable(void)
1047{ 1065{
1048 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; 1066 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
1067 RDT_RESOURCE_L3CODE);
1068}
1069
1070static int cdpl2_enable(void)
1071{
1072 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
1073 RDT_RESOURCE_L2CODE);
1074}
1075
1076static void cdp_disable(int level, int data_type, int code_type)
1077{
1078 struct rdt_resource *r = &rdt_resources_all[level];
1049 1079
1050 r->alloc_enabled = r->alloc_capable; 1080 r->alloc_enabled = r->alloc_capable;
1051 1081
1052 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) { 1082 if (rdt_resources_all[data_type].alloc_enabled) {
1053 rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled = false; 1083 rdt_resources_all[data_type].alloc_enabled = false;
1054 rdt_resources_all[RDT_RESOURCE_L3CODE].alloc_enabled = false; 1084 rdt_resources_all[code_type].alloc_enabled = false;
1055 set_l3_qos_cfg(r, false); 1085 set_cache_qos_cfg(level, false);
1056 } 1086 }
1057} 1087}
1058 1088
1089static void cdpl3_disable(void)
1090{
1091 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
1092}
1093
1094static void cdpl2_disable(void)
1095{
1096 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
1097}
1098
1099static void cdp_disable_all(void)
1100{
1101 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
1102 cdpl3_disable();
1103 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
1104 cdpl2_disable();
1105}
1106
1059static int parse_rdtgroupfs_options(char *data) 1107static int parse_rdtgroupfs_options(char *data)
1060{ 1108{
1061 char *token, *o = data; 1109 char *token, *o = data;
1062 int ret = 0; 1110 int ret = 0;
1063 1111
1064 while ((token = strsep(&o, ",")) != NULL) { 1112 while ((token = strsep(&o, ",")) != NULL) {
1065 if (!*token) 1113 if (!*token) {
1066 return -EINVAL; 1114 ret = -EINVAL;
1115 goto out;
1116 }
1067 1117
1068 if (!strcmp(token, "cdp")) 1118 if (!strcmp(token, "cdp")) {
1069 ret = cdp_enable(); 1119 ret = cdpl3_enable();
1120 if (ret)
1121 goto out;
1122 } else if (!strcmp(token, "cdpl2")) {
1123 ret = cdpl2_enable();
1124 if (ret)
1125 goto out;
1126 } else {
1127 ret = -EINVAL;
1128 goto out;
1129 }
1070 } 1130 }
1071 1131
1132 return 0;
1133
1134out:
1135 pr_err("Invalid mount option \"%s\"\n", token);
1136
1072 return ret; 1137 return ret;
1073} 1138}
1074 1139
@@ -1223,7 +1288,7 @@ out_mongrp:
1223out_info: 1288out_info:
1224 kernfs_remove(kn_info); 1289 kernfs_remove(kn_info);
1225out_cdp: 1290out_cdp:
1226 cdp_disable(); 1291 cdp_disable_all();
1227out: 1292out:
1228 rdt_last_cmd_clear(); 1293 rdt_last_cmd_clear();
1229 mutex_unlock(&rdtgroup_mutex); 1294 mutex_unlock(&rdtgroup_mutex);
@@ -1383,7 +1448,7 @@ static void rdt_kill_sb(struct super_block *sb)
1383 /*Put everything back to default values. */ 1448 /*Put everything back to default values. */
1384 for_each_alloc_enabled_rdt_resource(r) 1449 for_each_alloc_enabled_rdt_resource(r)
1385 reset_all_ctrls(r); 1450 reset_all_ctrls(r);
1386 cdp_disable(); 1451 cdp_disable_all();
1387 rmdir_all_sub(); 1452 rmdir_all_sub();
1388 static_branch_disable_cpuslocked(&rdt_alloc_enable_key); 1453 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
1389 static_branch_disable_cpuslocked(&rdt_mon_enable_key); 1454 static_branch_disable_cpuslocked(&rdt_mon_enable_key);