aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/intel_rdt.h6
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c7
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_schemata.c245
4 files changed, 259 insertions, 1 deletions
diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
index a0dd3e99038d..2e5eab09083e 100644
--- a/arch/x86/include/asm/intel_rdt.h
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -73,6 +73,7 @@ struct rftype {
73 * @num_domains: Number of domains active 73 * @num_domains: Number of domains active
74 * @msr_base: Base MSR address for CBMs 74 * @msr_base: Base MSR address for CBMs
75 * @tmp_cbms: Scratch space when updating schemata 75 * @tmp_cbms: Scratch space when updating schemata
76 * @num_tmp_cbms: Number of CBMs in tmp_cbms
76 * @cache_level: Which cache level defines scope of this domain 77 * @cache_level: Which cache level defines scope of this domain
77 * @cbm_idx_multi: Multiplier of CBM index 78 * @cbm_idx_multi: Multiplier of CBM index
78 * @cbm_idx_offset: Offset of CBM index. CBM index is computed by: 79 * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
@@ -90,6 +91,7 @@ struct rdt_resource {
90 int num_domains; 91 int num_domains;
91 int msr_base; 92 int msr_base;
92 u32 *tmp_cbms; 93 u32 *tmp_cbms;
94 int num_tmp_cbms;
93 int cache_level; 95 int cache_level;
94 int cbm_idx_multi; 96 int cbm_idx_multi;
95 int cbm_idx_offset; 97 int cbm_idx_offset;
@@ -170,4 +172,8 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
170void rdt_cbm_update(void *arg); 172void rdt_cbm_update(void *arg);
171struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); 173struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
172void rdtgroup_kn_unlock(struct kernfs_node *kn); 174void rdtgroup_kn_unlock(struct kernfs_node *kn);
175ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
176 char *buf, size_t nbytes, loff_t off);
177int rdtgroup_schemata_show(struct kernfs_open_file *of,
178 struct seq_file *s, void *v);
173#endif /* _ASM_X86_INTEL_RDT_H */ 179#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index b4334e86c1a9..c9f8c818d104 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
34obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o 34obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
35obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o 35obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
36 36
37obj-$(CONFIG_INTEL_RDT_A) += intel_rdt.o intel_rdt_rdtgroup.o 37obj-$(CONFIG_INTEL_RDT_A) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_schemata.o
38 38
39obj-$(CONFIG_X86_MCE) += mcheck/ 39obj-$(CONFIG_X86_MCE) += mcheck/
40obj-$(CONFIG_MTRR) += mtrr/ 40obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 5cc0865f2908..5c4bab9452b0 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -440,6 +440,13 @@ static struct rftype rdtgroup_base_files[] = {
440 .write = rdtgroup_tasks_write, 440 .write = rdtgroup_tasks_write,
441 .seq_show = rdtgroup_tasks_show, 441 .seq_show = rdtgroup_tasks_show,
442 }, 442 },
443 {
444 .name = "schemata",
445 .mode = 0644,
446 .kf_ops = &rdtgroup_kf_single_ops,
447 .write = rdtgroup_schemata_write,
448 .seq_show = rdtgroup_schemata_show,
449 },
443}; 450};
444 451
445static int rdt_num_closids_show(struct kernfs_open_file *of, 452static int rdt_num_closids_show(struct kernfs_open_file *of,
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
new file mode 100644
index 000000000000..f369cb8db0d5
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -0,0 +1,245 @@
1/*
2 * Resource Director Technology(RDT)
3 * - Cache Allocation code.
4 *
5 * Copyright (C) 2016 Intel Corporation
6 *
7 * Authors:
8 * Fenghua Yu <fenghua.yu@intel.com>
9 * Tony Luck <tony.luck@intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * More information about RDT be found in the Intel (R) x86 Architecture
21 * Software Developer Manual June 2016, volume 3, section 17.17.
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/kernfs.h>
27#include <linux/seq_file.h>
28#include <linux/slab.h>
29#include <asm/intel_rdt.h>
30
31/*
32 * Check whether a cache bit mask is valid. The SDM says:
33 * Please note that all (and only) contiguous '1' combinations
34 * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
35 * Additionally Haswell requires at least two bits set.
36 */
37static bool cbm_validate(unsigned long var, struct rdt_resource *r)
38{
39 unsigned long first_bit, zero_bit;
40
41 if (var == 0 || var > r->max_cbm)
42 return false;
43
44 first_bit = find_first_bit(&var, r->cbm_len);
45 zero_bit = find_next_zero_bit(&var, r->cbm_len, first_bit);
46
47 if (find_next_bit(&var, r->cbm_len, zero_bit) < r->cbm_len)
48 return false;
49
50 if ((zero_bit - first_bit) < r->min_cbm_bits)
51 return false;
52 return true;
53}
54
55/*
56 * Read one cache bit mask (hex). Check that it is valid for the current
57 * resource type.
58 */
59static int parse_cbm(char *buf, struct rdt_resource *r)
60{
61 unsigned long data;
62 int ret;
63
64 ret = kstrtoul(buf, 16, &data);
65 if (ret)
66 return ret;
67 if (!cbm_validate(data, r))
68 return -EINVAL;
69 r->tmp_cbms[r->num_tmp_cbms++] = data;
70
71 return 0;
72}
73
74/*
75 * For each domain in this resource we expect to find a series of:
76 * id=mask
77 * separated by ";". The "id" is in decimal, and must appear in the
78 * right order.
79 */
80static int parse_line(char *line, struct rdt_resource *r)
81{
82 char *dom = NULL, *id;
83 struct rdt_domain *d;
84 unsigned long dom_id;
85
86 list_for_each_entry(d, &r->domains, list) {
87 dom = strsep(&line, ";");
88 if (!dom)
89 return -EINVAL;
90 id = strsep(&dom, "=");
91 if (kstrtoul(id, 10, &dom_id) || dom_id != d->id)
92 return -EINVAL;
93 if (parse_cbm(dom, r))
94 return -EINVAL;
95 }
96
97 /* Any garbage at the end of the line? */
98 if (line && line[0])
99 return -EINVAL;
100 return 0;
101}
102
103static int update_domains(struct rdt_resource *r, int closid)
104{
105 struct msr_param msr_param;
106 cpumask_var_t cpu_mask;
107 struct rdt_domain *d;
108 int cpu, idx = 0;
109
110 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
111 return -ENOMEM;
112
113 msr_param.low = closid;
114 msr_param.high = msr_param.low + 1;
115 msr_param.res = r;
116
117 list_for_each_entry(d, &r->domains, list) {
118 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
119 d->cbm[msr_param.low] = r->tmp_cbms[idx++];
120 }
121 cpu = get_cpu();
122 /* Update CBM on this cpu if it's in cpu_mask. */
123 if (cpumask_test_cpu(cpu, cpu_mask))
124 rdt_cbm_update(&msr_param);
125 /* Update CBM on other cpus. */
126 smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
127 put_cpu();
128
129 free_cpumask_var(cpu_mask);
130
131 return 0;
132}
133
134ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
135 char *buf, size_t nbytes, loff_t off)
136{
137 struct rdtgroup *rdtgrp;
138 struct rdt_resource *r;
139 char *tok, *resname;
140 int closid, ret = 0;
141 u32 *l3_cbms = NULL;
142
143 /* Valid input requires a trailing newline */
144 if (nbytes == 0 || buf[nbytes - 1] != '\n')
145 return -EINVAL;
146 buf[nbytes - 1] = '\0';
147
148 rdtgrp = rdtgroup_kn_lock_live(of->kn);
149 if (!rdtgrp) {
150 rdtgroup_kn_unlock(of->kn);
151 return -ENOENT;
152 }
153
154 closid = rdtgrp->closid;
155
156 /* get scratch space to save all the masks while we validate input */
157 for_each_enabled_rdt_resource(r) {
158 r->tmp_cbms = kcalloc(r->num_domains, sizeof(*l3_cbms),
159 GFP_KERNEL);
160 if (!r->tmp_cbms) {
161 ret = -ENOMEM;
162 goto out;
163 }
164 r->num_tmp_cbms = 0;
165 }
166
167 while ((tok = strsep(&buf, "\n")) != NULL) {
168 resname = strsep(&tok, ":");
169 if (!tok) {
170 ret = -EINVAL;
171 goto out;
172 }
173 for_each_enabled_rdt_resource(r) {
174 if (!strcmp(resname, r->name) &&
175 closid < r->num_closid) {
176 ret = parse_line(tok, r);
177 if (ret)
178 goto out;
179 break;
180 }
181 }
182 if (!r->name) {
183 ret = -EINVAL;
184 goto out;
185 }
186 }
187
188 /* Did the parser find all the masks we need? */
189 for_each_enabled_rdt_resource(r) {
190 if (r->num_tmp_cbms != r->num_domains) {
191 ret = -EINVAL;
192 goto out;
193 }
194 }
195
196 for_each_enabled_rdt_resource(r) {
197 ret = update_domains(r, closid);
198 if (ret)
199 goto out;
200 }
201
202out:
203 rdtgroup_kn_unlock(of->kn);
204 for_each_enabled_rdt_resource(r) {
205 kfree(r->tmp_cbms);
206 r->tmp_cbms = NULL;
207 }
208 return ret ?: nbytes;
209}
210
211static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
212{
213 struct rdt_domain *dom;
214 bool sep = false;
215
216 seq_printf(s, "%s:", r->name);
217 list_for_each_entry(dom, &r->domains, list) {
218 if (sep)
219 seq_puts(s, ";");
220 seq_printf(s, "%d=%x", dom->id, dom->cbm[closid]);
221 sep = true;
222 }
223 seq_puts(s, "\n");
224}
225
226int rdtgroup_schemata_show(struct kernfs_open_file *of,
227 struct seq_file *s, void *v)
228{
229 struct rdtgroup *rdtgrp;
230 struct rdt_resource *r;
231 int closid, ret = 0;
232
233 rdtgrp = rdtgroup_kn_lock_live(of->kn);
234 if (rdtgrp) {
235 closid = rdtgrp->closid;
236 for_each_enabled_rdt_resource(r) {
237 if (closid < r->num_closid)
238 show_doms(s, r, closid);
239 }
240 } else {
241 ret = -ENOENT;
242 }
243 rdtgroup_kn_unlock(of->kn);
244 return ret;
245}