diff options
author | Vikas Shivappa <vikas.shivappa@linux.intel.com> | 2018-04-20 18:36:17 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-05-19 07:16:43 -0400 |
commit | 19c635ab24a1e94a759e82bfb34554a6a0db215e (patch) | |
tree | 8309c7eb590909e3b6028167906f3b80c56da3b0 | |
parent | d6c64a4f49fdea0ae79addc3282ae8eb8581bdfc (diff) |
x86/intel_rdt/mba_sc: Enable/disable MBA software controller
Currently user does memory bandwidth allocation(MBA) by specifying the
bandwidth in percentage via the resctrl schemata file:
"/sys/fs/resctrl/schemata"
Add a new mount option "mba_MBps" to enable the user to specify MBA
in MBps:
$mount -t resctrl resctrl [-o cdp[,cdpl2][mba_MBps]] /sys/fs/resctrl
Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: ravi.v.shankar@intel.com
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
Cc: vikas.shivappa@intel.com
Cc: ak@linux.intel.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/1524263781-14267-3-git-send-email-vikas.shivappa@linux.intel.com
-rw-r--r-- | arch/x86/kernel/cpu/intel_rdt.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_rdt.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 30 |
3 files changed, 41 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 589b948e6e01..53ee6838c496 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c | |||
@@ -230,6 +230,14 @@ static inline void cache_alloc_hsw_probe(void) | |||
230 | rdt_alloc_capable = true; | 230 | rdt_alloc_capable = true; |
231 | } | 231 | } |
232 | 232 | ||
233 | bool is_mba_sc(struct rdt_resource *r) | ||
234 | { | ||
235 | if (!r) | ||
236 | return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc; | ||
237 | |||
238 | return r->membw.mba_sc; | ||
239 | } | ||
240 | |||
233 | /* | 241 | /* |
234 | * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values | 242 | * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values |
235 | * exposed to user interface and the h/w understandable delay values. | 243 | * exposed to user interface and the h/w understandable delay values. |
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h index 3fd7a70ee04a..74aee0fdc97c 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/intel_rdt.h | |||
@@ -259,6 +259,7 @@ struct rdt_cache { | |||
259 | * @min_bw: Minimum memory bandwidth percentage user can request | 259 | * @min_bw: Minimum memory bandwidth percentage user can request |
260 | * @bw_gran: Granularity at which the memory bandwidth is allocated | 260 | * @bw_gran: Granularity at which the memory bandwidth is allocated |
261 | * @delay_linear: True if memory B/W delay is in linear scale | 261 | * @delay_linear: True if memory B/W delay is in linear scale |
262 | * @mba_sc: True if MBA software controller(mba_sc) is enabled | ||
262 | * @mb_map: Mapping of memory B/W percentage to memory B/W delay | 263 | * @mb_map: Mapping of memory B/W percentage to memory B/W delay |
263 | */ | 264 | */ |
264 | struct rdt_membw { | 265 | struct rdt_membw { |
@@ -266,6 +267,7 @@ struct rdt_membw { | |||
266 | u32 min_bw; | 267 | u32 min_bw; |
267 | u32 bw_gran; | 268 | u32 bw_gran; |
268 | u32 delay_linear; | 269 | u32 delay_linear; |
270 | bool mba_sc; | ||
269 | u32 *mb_map; | 271 | u32 *mb_map; |
270 | }; | 272 | }; |
271 | 273 | ||
@@ -445,6 +447,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_domain *d, | |||
445 | void mbm_setup_overflow_handler(struct rdt_domain *dom, | 447 | void mbm_setup_overflow_handler(struct rdt_domain *dom, |
446 | unsigned long delay_ms); | 448 | unsigned long delay_ms); |
447 | void mbm_handle_overflow(struct work_struct *work); | 449 | void mbm_handle_overflow(struct work_struct *work); |
450 | bool is_mba_sc(struct rdt_resource *r); | ||
448 | void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); | 451 | void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); |
449 | void cqm_handle_limbo(struct work_struct *work); | 452 | void cqm_handle_limbo(struct work_struct *work); |
450 | bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); | 453 | bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); |
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index fca759d272a1..440025446239 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | |||
@@ -1005,6 +1005,11 @@ static void l2_qos_cfg_update(void *arg) | |||
1005 | wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); | 1005 | wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); |
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | static inline bool is_mba_linear(void) | ||
1009 | { | ||
1010 | return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear; | ||
1011 | } | ||
1012 | |||
1008 | static int set_cache_qos_cfg(int level, bool enable) | 1013 | static int set_cache_qos_cfg(int level, bool enable) |
1009 | { | 1014 | { |
1010 | void (*update)(void *arg); | 1015 | void (*update)(void *arg); |
@@ -1041,6 +1046,25 @@ static int set_cache_qos_cfg(int level, bool enable) | |||
1041 | return 0; | 1046 | return 0; |
1042 | } | 1047 | } |
1043 | 1048 | ||
1049 | /* | ||
1050 | * Enable or disable the MBA software controller | ||
1051 | * which helps user specify bandwidth in MBps. | ||
1052 | * MBA software controller is supported only if | ||
1053 | * MBM is supported and MBA is in linear scale. | ||
1054 | */ | ||
1055 | static int set_mba_sc(bool mba_sc) | ||
1056 | { | ||
1057 | struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA]; | ||
1058 | |||
1059 | if (!is_mbm_enabled() || !is_mba_linear() || | ||
1060 | mba_sc == is_mba_sc(r)) | ||
1061 | return -EINVAL; | ||
1062 | |||
1063 | r->membw.mba_sc = mba_sc; | ||
1064 | |||
1065 | return 0; | ||
1066 | } | ||
1067 | |||
1044 | static int cdp_enable(int level, int data_type, int code_type) | 1068 | static int cdp_enable(int level, int data_type, int code_type) |
1045 | { | 1069 | { |
1046 | struct rdt_resource *r_ldata = &rdt_resources_all[data_type]; | 1070 | struct rdt_resource *r_ldata = &rdt_resources_all[data_type]; |
@@ -1123,6 +1147,10 @@ static int parse_rdtgroupfs_options(char *data) | |||
1123 | ret = cdpl2_enable(); | 1147 | ret = cdpl2_enable(); |
1124 | if (ret) | 1148 | if (ret) |
1125 | goto out; | 1149 | goto out; |
1150 | } else if (!strcmp(token, "mba_MBps")) { | ||
1151 | ret = set_mba_sc(true); | ||
1152 | if (ret) | ||
1153 | goto out; | ||
1126 | } else { | 1154 | } else { |
1127 | ret = -EINVAL; | 1155 | ret = -EINVAL; |
1128 | goto out; | 1156 | goto out; |
@@ -1445,6 +1473,8 @@ static void rdt_kill_sb(struct super_block *sb) | |||
1445 | cpus_read_lock(); | 1473 | cpus_read_lock(); |
1446 | mutex_lock(&rdtgroup_mutex); | 1474 | mutex_lock(&rdtgroup_mutex); |
1447 | 1475 | ||
1476 | set_mba_sc(false); | ||
1477 | |||
1448 | /*Put everything back to default values. */ | 1478 | /*Put everything back to default values. */ |
1449 | for_each_alloc_enabled_rdt_resource(r) | 1479 | for_each_alloc_enabled_rdt_resource(r) |
1450 | reset_all_ctrls(r); | 1480 | reset_all_ctrls(r); |