aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVikas Shivappa <vikas.shivappa@linux.intel.com>2017-04-07 20:33:51 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-04-14 10:10:07 -0400
commit2545e9f51ea860736c4dc1e90a44ed75e9c91e3b (patch)
tree03474c2c2f42d5bf80464b59c303ee39395cf1de
parenta9cad3d4f046bbd8f096b78d220c8d7074c2e93f (diff)
x86/intel_rdt: Cleanup namespace to support multiple resource types
Lot of data structures and functions are named after cache specific resources(named after cbm, cache etc). In many cases other non cache resources may need to share the same data structures/functions. Generalize such naming to prepare to add more resources like memory bandwidth. Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com> Cc: ravi.v.shankar@intel.com Cc: tony.luck@intel.com Cc: fenghua.yu@intel.com Cc: vikas.shivappa@intel.com Link: http://lkml.kernel.org/r/1491611637-20417-3-git-send-email-vikas.shivappa@linux.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/include/asm/intel_rdt.h22
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c28
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c16
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_schemata.c20
4 files changed, 43 insertions, 43 deletions
diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
index 611c82306fdf..55e0459b9a03 100644
--- a/arch/x86/include/asm/intel_rdt.h
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -79,7 +79,7 @@ struct rftype {
79 * @capable: Is this feature available on this machine 79 * @capable: Is this feature available on this machine
80 * @name: Name to use in "schemata" file 80 * @name: Name to use in "schemata" file
81 * @num_closid: Number of CLOSIDs available 81 * @num_closid: Number of CLOSIDs available
82 * @max_cbm: Largest Cache Bit Mask allowed 82 * @default_ctrl: Specifies default cache cbm or mem b/w percent.
83 * @data_width: Character width of data when displaying 83 * @data_width: Character width of data when displaying
84 * @min_cbm_bits: Minimum number of consecutive bits to be set 84 * @min_cbm_bits: Minimum number of consecutive bits to be set
85 * in a cache bit mask 85 * in a cache bit mask
@@ -97,7 +97,7 @@ struct rdt_resource {
97 int num_closid; 97 int num_closid;
98 int cbm_len; 98 int cbm_len;
99 int min_cbm_bits; 99 int min_cbm_bits;
100 u32 max_cbm; 100 u32 default_ctrl;
101 int data_width; 101 int data_width;
102 struct list_head domains; 102 struct list_head domains;
103 int msr_base; 103 int msr_base;
@@ -111,17 +111,17 @@ struct rdt_resource {
111 * @list: all instances of this resource 111 * @list: all instances of this resource
112 * @id: unique id for this instance 112 * @id: unique id for this instance
113 * @cpu_mask: which cpus share this resource 113 * @cpu_mask: which cpus share this resource
114 * @cbm: array of cache bit masks (indexed by CLOSID) 114 * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
115 * @new_cbm: new cbm value to be loaded 115 * @new_ctrl: new ctrl value to be loaded
116 * @have_new_cbm: did user provide new_cbm for this domain 116 * @have_new_ctrl: did user provide new_ctrl for this domain
117 */ 117 */
118struct rdt_domain { 118struct rdt_domain {
119 struct list_head list; 119 struct list_head list;
120 int id; 120 int id;
121 struct cpumask cpu_mask; 121 struct cpumask cpu_mask;
122 u32 *cbm; 122 u32 *ctrl_val;
123 u32 new_cbm; 123 u32 new_ctrl;
124 bool have_new_cbm; 124 bool have_new_ctrl;
125}; 125};
126 126
127/** 127/**
@@ -172,8 +172,8 @@ union cpuid_0x10_1_eax {
172 unsigned int full; 172 unsigned int full;
173}; 173};
174 174
175/* CPUID.(EAX=10H, ECX=ResID=1).EDX */ 175/* CPUID.(EAX=10H, ECX=ResID).EDX */
176union cpuid_0x10_1_edx { 176union cpuid_0x10_x_edx {
177 struct { 177 struct {
178 unsigned int cos_max:16; 178 unsigned int cos_max:16;
179 } split; 179 } split;
@@ -182,7 +182,7 @@ union cpuid_0x10_1_edx {
182 182
183DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid); 183DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
184 184
185void rdt_cbm_update(void *arg); 185void rdt_ctrl_update(void *arg);
186struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); 186struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
187void rdtgroup_kn_unlock(struct kernfs_node *kn); 187void rdtgroup_kn_unlock(struct kernfs_node *kn);
188ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, 188ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index d2e5f92b5428..92d8431fdc38 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -125,7 +125,7 @@ static inline bool cache_alloc_hsw_probe(void)
125 125
126 r->num_closid = 4; 126 r->num_closid = 4;
127 r->cbm_len = 20; 127 r->cbm_len = 20;
128 r->max_cbm = max_cbm; 128 r->default_ctrl = max_cbm;
129 r->min_cbm_bits = 2; 129 r->min_cbm_bits = 2;
130 r->capable = true; 130 r->capable = true;
131 r->enabled = true; 131 r->enabled = true;
@@ -136,16 +136,16 @@ static inline bool cache_alloc_hsw_probe(void)
136 return false; 136 return false;
137} 137}
138 138
139static void rdt_get_config(int idx, struct rdt_resource *r) 139static void rdt_get_cache_config(int idx, struct rdt_resource *r)
140{ 140{
141 union cpuid_0x10_1_eax eax; 141 union cpuid_0x10_1_eax eax;
142 union cpuid_0x10_1_edx edx; 142 union cpuid_0x10_x_edx edx;
143 u32 ebx, ecx; 143 u32 ebx, ecx;
144 144
145 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); 145 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
146 r->num_closid = edx.split.cos_max + 1; 146 r->num_closid = edx.split.cos_max + 1;
147 r->cbm_len = eax.split.cbm_len + 1; 147 r->cbm_len = eax.split.cbm_len + 1;
148 r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1; 148 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
149 r->data_width = (r->cbm_len + 3) / 4; 149 r->data_width = (r->cbm_len + 3) / 4;
150 r->capable = true; 150 r->capable = true;
151 r->enabled = true; 151 r->enabled = true;
@@ -158,7 +158,7 @@ static void rdt_get_cdp_l3_config(int type)
158 158
159 r->num_closid = r_l3->num_closid / 2; 159 r->num_closid = r_l3->num_closid / 2;
160 r->cbm_len = r_l3->cbm_len; 160 r->cbm_len = r_l3->cbm_len;
161 r->max_cbm = r_l3->max_cbm; 161 r->default_ctrl = r_l3->default_ctrl;
162 r->data_width = (r->cbm_len + 3) / 4; 162 r->data_width = (r->cbm_len + 3) / 4;
163 r->capable = true; 163 r->capable = true;
164 /* 164 /*
@@ -181,7 +181,7 @@ static int get_cache_id(int cpu, int level)
181 return -1; 181 return -1;
182} 182}
183 183
184void rdt_cbm_update(void *arg) 184void rdt_ctrl_update(void *arg)
185{ 185{
186 struct msr_param *m = (struct msr_param *)arg; 186 struct msr_param *m = (struct msr_param *)arg;
187 struct rdt_resource *r = m->res; 187 struct rdt_resource *r = m->res;
@@ -202,7 +202,7 @@ found:
202 for (i = m->low; i < m->high; i++) { 202 for (i = m->low; i < m->high; i++) {
203 int idx = cbm_idx(r, i); 203 int idx = cbm_idx(r, i);
204 204
205 wrmsrl(r->msr_base + idx, d->cbm[i]); 205 wrmsrl(r->msr_base + idx, d->ctrl_val[i]);
206 } 206 }
207} 207}
208 208
@@ -275,8 +275,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
275 275
276 d->id = id; 276 d->id = id;
277 277
278 d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL); 278 d->ctrl_val = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
279 if (!d->cbm) { 279 if (!d->ctrl_val) {
280 kfree(d); 280 kfree(d);
281 return; 281 return;
282 } 282 }
@@ -284,8 +284,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
284 for (i = 0; i < r->num_closid; i++) { 284 for (i = 0; i < r->num_closid; i++) {
285 int idx = cbm_idx(r, i); 285 int idx = cbm_idx(r, i);
286 286
287 d->cbm[i] = r->max_cbm; 287 d->ctrl_val[i] = r->default_ctrl;
288 wrmsrl(r->msr_base + idx, d->cbm[i]); 288 wrmsrl(r->msr_base + idx, d->ctrl_val[i]);
289 } 289 }
290 290
291 cpumask_set_cpu(cpu, &d->cpu_mask); 291 cpumask_set_cpu(cpu, &d->cpu_mask);
@@ -305,7 +305,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
305 305
306 cpumask_clear_cpu(cpu, &d->cpu_mask); 306 cpumask_clear_cpu(cpu, &d->cpu_mask);
307 if (cpumask_empty(&d->cpu_mask)) { 307 if (cpumask_empty(&d->cpu_mask)) {
308 kfree(d->cbm); 308 kfree(d->ctrl_val);
309 list_del(&d->list); 309 list_del(&d->list);
310 kfree(d); 310 kfree(d);
311 } 311 }
@@ -383,7 +383,7 @@ static __init bool get_rdt_resources(void)
383 return false; 383 return false;
384 384
385 if (boot_cpu_has(X86_FEATURE_CAT_L3)) { 385 if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
386 rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]); 386 rdt_get_cache_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
387 if (boot_cpu_has(X86_FEATURE_CDP_L3)) { 387 if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
388 rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA); 388 rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
389 rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE); 389 rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
@@ -392,7 +392,7 @@ static __init bool get_rdt_resources(void)
392 } 392 }
393 if (boot_cpu_has(X86_FEATURE_CAT_L2)) { 393 if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
394 /* CPUID 0x10.2 fields are same format at 0x10.1 */ 394 /* CPUID 0x10.2 fields are same format at 0x10.1 */
395 rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]); 395 rdt_get_cache_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
396 ret = true; 396 ret = true;
397 } 397 }
398 return ret; 398 return ret;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 6870ebfcdcb3..380ee9d8ee6f 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -519,12 +519,12 @@ static int rdt_num_closids_show(struct kernfs_open_file *of,
519 return 0; 519 return 0;
520} 520}
521 521
522static int rdt_cbm_mask_show(struct kernfs_open_file *of, 522static int rdt_default_ctrl_show(struct kernfs_open_file *of,
523 struct seq_file *seq, void *v) 523 struct seq_file *seq, void *v)
524{ 524{
525 struct rdt_resource *r = of->kn->parent->priv; 525 struct rdt_resource *r = of->kn->parent->priv;
526 526
527 seq_printf(seq, "%x\n", r->max_cbm); 527 seq_printf(seq, "%x\n", r->default_ctrl);
528 528
529 return 0; 529 return 0;
530} 530}
@@ -551,7 +551,7 @@ static struct rftype res_info_files[] = {
551 .name = "cbm_mask", 551 .name = "cbm_mask",
552 .mode = 0444, 552 .mode = 0444,
553 .kf_ops = &rdtgroup_kf_single_ops, 553 .kf_ops = &rdtgroup_kf_single_ops,
554 .seq_show = rdt_cbm_mask_show, 554 .seq_show = rdt_default_ctrl_show,
555 }, 555 },
556 { 556 {
557 .name = "min_cbm_bits", 557 .name = "min_cbm_bits",
@@ -801,7 +801,7 @@ out:
801 return dentry; 801 return dentry;
802} 802}
803 803
804static int reset_all_cbms(struct rdt_resource *r) 804static int reset_all_ctrls(struct rdt_resource *r)
805{ 805{
806 struct msr_param msr_param; 806 struct msr_param msr_param;
807 cpumask_var_t cpu_mask; 807 cpumask_var_t cpu_mask;
@@ -824,14 +824,14 @@ static int reset_all_cbms(struct rdt_resource *r)
824 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 824 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
825 825
826 for (i = 0; i < r->num_closid; i++) 826 for (i = 0; i < r->num_closid; i++)
827 d->cbm[i] = r->max_cbm; 827 d->ctrl_val[i] = r->default_ctrl;
828 } 828 }
829 cpu = get_cpu(); 829 cpu = get_cpu();
830 /* Update CBM on this cpu if it's in cpu_mask. */ 830 /* Update CBM on this cpu if it's in cpu_mask. */
831 if (cpumask_test_cpu(cpu, cpu_mask)) 831 if (cpumask_test_cpu(cpu, cpu_mask))
832 rdt_cbm_update(&msr_param); 832 rdt_ctrl_update(&msr_param);
833 /* Update CBM on all other cpus in cpu_mask. */ 833 /* Update CBM on all other cpus in cpu_mask. */
834 smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1); 834 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
835 put_cpu(); 835 put_cpu();
836 836
837 free_cpumask_var(cpu_mask); 837 free_cpumask_var(cpu_mask);
@@ -917,7 +917,7 @@ static void rdt_kill_sb(struct super_block *sb)
917 917
918 /*Put everything back to default values. */ 918 /*Put everything back to default values. */
919 for_each_enabled_rdt_resource(r) 919 for_each_enabled_rdt_resource(r)
920 reset_all_cbms(r); 920 reset_all_ctrls(r);
921 cdp_disable(); 921 cdp_disable();
922 rmdir_all_sub(); 922 rmdir_all_sub();
923 static_branch_disable(&rdt_enable_key); 923 static_branch_disable(&rdt_enable_key);
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
index 8594db455aa1..7695179776ba 100644
--- a/arch/x86/kernel/cpu/intel_rdt_schemata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -38,7 +38,7 @@ static bool cbm_validate(unsigned long var, struct rdt_resource *r)
38{ 38{
39 unsigned long first_bit, zero_bit; 39 unsigned long first_bit, zero_bit;
40 40
41 if (var == 0 || var > r->max_cbm) 41 if (var == 0 || var > r->default_ctrl)
42 return false; 42 return false;
43 43
44 first_bit = find_first_bit(&var, r->cbm_len); 44 first_bit = find_first_bit(&var, r->cbm_len);
@@ -61,7 +61,7 @@ static int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
61 unsigned long data; 61 unsigned long data;
62 int ret; 62 int ret;
63 63
64 if (d->have_new_cbm) 64 if (d->have_new_ctrl)
65 return -EINVAL; 65 return -EINVAL;
66 66
67 ret = kstrtoul(buf, 16, &data); 67 ret = kstrtoul(buf, 16, &data);
@@ -69,8 +69,8 @@ static int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
69 return ret; 69 return ret;
70 if (!cbm_validate(data, r)) 70 if (!cbm_validate(data, r))
71 return -EINVAL; 71 return -EINVAL;
72 d->new_cbm = data; 72 d->new_ctrl = data;
73 d->have_new_cbm = true; 73 d->have_new_ctrl = true;
74 74
75 return 0; 75 return 0;
76} 76}
@@ -119,9 +119,9 @@ static int update_domains(struct rdt_resource *r, int closid)
119 msr_param.res = r; 119 msr_param.res = r;
120 120
121 list_for_each_entry(d, &r->domains, list) { 121 list_for_each_entry(d, &r->domains, list) {
122 if (d->have_new_cbm && d->new_cbm != d->cbm[closid]) { 122 if (d->have_new_ctrl && d->new_ctrl != d->ctrl_val[closid]) {
123 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 123 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
124 d->cbm[closid] = d->new_cbm; 124 d->ctrl_val[closid] = d->new_ctrl;
125 } 125 }
126 } 126 }
127 if (cpumask_empty(cpu_mask)) 127 if (cpumask_empty(cpu_mask))
@@ -129,9 +129,9 @@ static int update_domains(struct rdt_resource *r, int closid)
129 cpu = get_cpu(); 129 cpu = get_cpu();
130 /* Update CBM on this cpu if it's in cpu_mask. */ 130 /* Update CBM on this cpu if it's in cpu_mask. */
131 if (cpumask_test_cpu(cpu, cpu_mask)) 131 if (cpumask_test_cpu(cpu, cpu_mask))
132 rdt_cbm_update(&msr_param); 132 rdt_ctrl_update(&msr_param);
133 /* Update CBM on other cpus. */ 133 /* Update CBM on other cpus. */
134 smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1); 134 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
135 put_cpu(); 135 put_cpu();
136 136
137done: 137done:
@@ -164,7 +164,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
164 164
165 for_each_enabled_rdt_resource(r) 165 for_each_enabled_rdt_resource(r)
166 list_for_each_entry(dom, &r->domains, list) 166 list_for_each_entry(dom, &r->domains, list)
167 dom->have_new_cbm = false; 167 dom->have_new_ctrl = false;
168 168
169 while ((tok = strsep(&buf, "\n")) != NULL) { 169 while ((tok = strsep(&buf, "\n")) != NULL) {
170 resname = strsep(&tok, ":"); 170 resname = strsep(&tok, ":");
@@ -208,7 +208,7 @@ static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
208 if (sep) 208 if (sep)
209 seq_puts(s, ";"); 209 seq_puts(s, ";");
210 seq_printf(s, "%d=%0*x", dom->id, max_data_width, 210 seq_printf(s, "%d=%0*x", dom->id, max_data_width,
211 dom->cbm[closid]); 211 dom->ctrl_val[closid]);
212 sep = true; 212 sep = true;
213 } 213 }
214 seq_puts(s, "\n"); 214 seq_puts(s, "\n");