aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-08-23 10:31:13 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-09-26 09:44:51 -0400
commit881730ad365130f64b5c70c40904b04eb3b79de3 (patch)
tree53fe5c94f2497475967a516cb9c148bcf6d21adb
parent648609e3f24599a5ad8d53df2ec13fbc37810bcc (diff)
s390/cache: expose cpu cache topology via sysfs
Expose cpu cache topology via sysfs. The created sysfs directory structure is compatible to what x86, ia64 and powerpc have. On s390 we expose only information about cpu caches which are private to a cpu via sysfs . Caches which are shared between cpus do not have a sysfs representation. The reason for that is that the file "shared_cpu_map" is mandatory and only if running under LPAR it is possible to tell which cpus share which cache. Second level hypervisors however do not and cannot expose that information to guests. In order to have a consistent view we made the choice to always only expose information about private cpu caches via sysfs. Example for a z196 cpu (cpu1 in /sys/devices/cpu): cpu1/cache/index0/size -- 64K cpu1/cache/index0/type -- Data cpu1/cache/index0/level -- 1 cpu1/cache/index0/number_of_sets -- 64 cpu1/cache/index0/shared_cpu_map -- 00000000,00000002 cpu1/cache/index0/shared_cpu_list -- 1 cpu1/cache/index0/coherency_line_size -- 256 cpu1/cache/index0/ways_of_associativity -- 4 cpu1/cache/index1/size -- 128K cpu1/cache/index1/type -- Instruction cpu1/cache/index1/level -- 1 cpu1/cache/index1/number_of_sets -- 64 cpu1/cache/index1/shared_cpu_map -- 00000000,00000002 cpu1/cache/index1/shared_cpu_list -- 1 cpu1/cache/index1/coherency_line_size -- 256 cpu1/cache/index1/ways_of_associativity -- 8 cpu1/cache/index2/size -- 1536K cpu1/cache/index2/type -- Unified cpu1/cache/index2/level -- 2 cpu1/cache/index2/number_of_sets -- 512 cpu1/cache/index2/shared_cpu_map -- 00000000,00000002 cpu1/cache/index2/shared_cpu_list -- 1 cpu1/cache/index2/coherency_line_size -- 256 cpu1/cache/index2/ways_of_associativity -- 12 Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/cache.c359
2 files changed, 360 insertions, 0 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 9733b3f0eb6d..86b8247134c1 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
49obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 49obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
50obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 50obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
51obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o 51obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o
52obj-$(CONFIG_64BIT) += cache.o
52 53
53# Kexec part 54# Kexec part
54S390_KEXEC_OBJS := machine_kexec.o crash.o 55S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
new file mode 100644
index 000000000000..5e20bab4df22
--- /dev/null
+++ b/arch/s390/kernel/cache.c
@@ -0,0 +1,359 @@
1/*
2 * Extract CPU cache information and expose them via sysfs.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/notifier.h>
9#include <linux/init.h>
10#include <linux/list.h>
11#include <linux/slab.h>
12#include <linux/cpu.h>
13#include <asm/facility.h>
14
15struct cache {
16 unsigned long size;
17 unsigned int line_size;
18 unsigned int associativity;
19 unsigned int nr_sets;
20 int level;
21 int type;
22 struct list_head list;
23};
24
25struct cache_dir {
26 struct kobject *kobj;
27 struct cache_index_dir *index;
28};
29
30struct cache_index_dir {
31 struct kobject kobj;
32 int cpu;
33 struct cache *cache;
34 struct cache_index_dir *next;
35};
36
37enum {
38 CACHE_SCOPE_NOTEXISTS,
39 CACHE_SCOPE_PRIVATE,
40 CACHE_SCOPE_SHARED,
41 CACHE_SCOPE_RESERVED,
42};
43
44enum {
45 CACHE_TYPE_SEPARATE,
46 CACHE_TYPE_DATA,
47 CACHE_TYPE_INSTRUCTION,
48 CACHE_TYPE_UNIFIED,
49};
50
51enum {
52 EXTRACT_TOPOLOGY,
53 EXTRACT_LINE_SIZE,
54 EXTRACT_SIZE,
55 EXTRACT_ASSOCIATIVITY,
56};
57
58enum {
59 CACHE_TI_UNIFIED = 0,
60 CACHE_TI_INSTRUCTION = 0,
61 CACHE_TI_DATA,
62};
63
64struct cache_info {
65 unsigned char : 4;
66 unsigned char scope : 2;
67 unsigned char type : 2;
68};
69
70#define CACHE_MAX_LEVEL 8
71
72union cache_topology {
73 struct cache_info ci[CACHE_MAX_LEVEL];
74 unsigned long long raw;
75};
76
77static const char * const cache_type_string[] = {
78 "Data",
79 "Instruction",
80 "Unified",
81};
82
83static struct cache_dir *cache_dir_cpu[NR_CPUS];
84static LIST_HEAD(cache_list);
85
86static inline unsigned long ecag(int ai, int li, int ti)
87{
88 unsigned long cmd, val;
89
90 cmd = ai << 4 | li << 1 | ti;
91 asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
92 : "=d" (val) : "a" (cmd));
93 return val;
94}
95
96static int __init cache_add(int level, int type)
97{
98 struct cache *cache;
99 int ti;
100
101 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
102 if (!cache)
103 return -ENOMEM;
104 ti = type == CACHE_TYPE_DATA ? CACHE_TI_DATA : CACHE_TI_UNIFIED;
105 cache->size = ecag(EXTRACT_SIZE, level, ti);
106 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
107 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
108 cache->nr_sets = cache->size / cache->associativity;
109 cache->nr_sets /= cache->line_size;
110 cache->level = level + 1;
111 cache->type = type;
112 list_add_tail(&cache->list, &cache_list);
113 return 0;
114}
115
116static void __init cache_build_info(void)
117{
118 struct cache *cache, *next;
119 union cache_topology ct;
120 int level, rc;
121
122 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
123 for (level = 0; level < CACHE_MAX_LEVEL; level++) {
124 switch (ct.ci[level].scope) {
125 case CACHE_SCOPE_NOTEXISTS:
126 case CACHE_SCOPE_RESERVED:
127 case CACHE_SCOPE_SHARED:
128 return;
129 case CACHE_SCOPE_PRIVATE:
130 break;
131 }
132 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
133 rc = cache_add(level, CACHE_TYPE_DATA);
134 rc |= cache_add(level, CACHE_TYPE_INSTRUCTION);
135 } else {
136 rc = cache_add(level, ct.ci[level].type);
137 }
138 if (rc)
139 goto error;
140 }
141 return;
142error:
143 list_for_each_entry_safe(cache, next, &cache_list, list) {
144 list_del(&cache->list);
145 kfree(cache);
146 }
147}
148
149static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu)
150{
151 struct cache_dir *cache_dir;
152 struct kobject *kobj = NULL;
153 struct device *dev;
154
155 dev = get_cpu_device(cpu);
156 if (!dev)
157 goto out;
158 kobj = kobject_create_and_add("cache", &dev->kobj);
159 if (!kobj)
160 goto out;
161 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
162 if (!cache_dir)
163 goto out;
164 cache_dir->kobj = kobj;
165 cache_dir_cpu[cpu] = cache_dir;
166 return cache_dir;
167out:
168 kobject_put(kobj);
169 return NULL;
170}
171
172static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
173{
174 return container_of(kobj, struct cache_index_dir, kobj);
175}
176
177static void cache_index_release(struct kobject *kobj)
178{
179 struct cache_index_dir *index;
180
181 index = kobj_to_cache_index_dir(kobj);
182 kfree(index);
183}
184
185static ssize_t cache_index_show(struct kobject *kobj,
186 struct attribute *attr, char *buf)
187{
188 struct kobj_attribute *kobj_attr;
189
190 kobj_attr = container_of(attr, struct kobj_attribute, attr);
191 return kobj_attr->show(kobj, kobj_attr, buf);
192}
193
194#define DEFINE_CACHE_ATTR(_name, _format, _value) \
195static ssize_t cache_##_name##_show(struct kobject *kobj, \
196 struct kobj_attribute *attr, \
197 char *buf) \
198{ \
199 struct cache_index_dir *index; \
200 \
201 index = kobj_to_cache_index_dir(kobj); \
202 return sprintf(buf, _format, _value); \
203} \
204static struct kobj_attribute cache_##_name##_attr = \
205 __ATTR(_name, 0444, cache_##_name##_show, NULL);
206
207DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
208DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
209DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
210DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
211DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type - 1]);
212DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
213
214static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
215{
216 struct cache_index_dir *index;
217 int len;
218
219 index = kobj_to_cache_index_dir(kobj);
220 len = type ?
221 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
222 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
223 len += sprintf(&buf[len], "\n");
224 return len;
225}
226
227static ssize_t shared_cpu_map_show(struct kobject *kobj,
228 struct kobj_attribute *attr, char *buf)
229{
230 return shared_cpu_map_func(kobj, 0, buf);
231}
232static struct kobj_attribute cache_shared_cpu_map_attr =
233 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
234
235static ssize_t shared_cpu_list_show(struct kobject *kobj,
236 struct kobj_attribute *attr, char *buf)
237{
238 return shared_cpu_map_func(kobj, 1, buf);
239}
240static struct kobj_attribute cache_shared_cpu_list_attr =
241 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
242
243static struct attribute *cache_index_default_attrs[] = {
244 &cache_type_attr.attr,
245 &cache_size_attr.attr,
246 &cache_number_of_sets_attr.attr,
247 &cache_ways_of_associativity_attr.attr,
248 &cache_level_attr.attr,
249 &cache_coherency_line_size_attr.attr,
250 &cache_shared_cpu_map_attr.attr,
251 &cache_shared_cpu_list_attr.attr,
252 NULL,
253};
254
255static const struct sysfs_ops cache_index_ops = {
256 .show = cache_index_show,
257};
258
259static struct kobj_type cache_index_type = {
260 .sysfs_ops = &cache_index_ops,
261 .release = cache_index_release,
262 .default_attrs = cache_index_default_attrs,
263};
264
265static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir,
266 struct cache *cache, int index,
267 int cpu)
268{
269 struct cache_index_dir *index_dir;
270 int rc;
271
272 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
273 if (!index_dir)
274 return -ENOMEM;
275 index_dir->cache = cache;
276 index_dir->cpu = cpu;
277 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
278 cache_dir->kobj, "index%d", index);
279 if (rc)
280 goto out;
281 index_dir->next = cache_dir->index;
282 cache_dir->index = index_dir;
283 return 0;
284out:
285 kfree(index_dir);
286 return rc;
287}
288
289static int __cpuinit cache_add_cpu(int cpu)
290{
291 struct cache_dir *cache_dir;
292 struct cache *cache;
293 int rc, index = 0;
294
295 if (list_empty(&cache_list))
296 return 0;
297 cache_dir = cache_create_cache_dir(cpu);
298 if (!cache_dir)
299 return -ENOMEM;
300 list_for_each_entry(cache, &cache_list, list) {
301 rc = cache_create_index_dir(cache_dir, cache, index, cpu);
302 if (rc)
303 return rc;
304 index++;
305 }
306 return 0;
307}
308
309static void __cpuinit cache_remove_cpu(int cpu)
310{
311 struct cache_index_dir *index, *next;
312 struct cache_dir *cache_dir;
313
314 cache_dir = cache_dir_cpu[cpu];
315 if (!cache_dir)
316 return;
317 index = cache_dir->index;
318 while (index) {
319 next = index->next;
320 kobject_put(&index->kobj);
321 index = next;
322 }
323 kobject_put(cache_dir->kobj);
324 kfree(cache_dir);
325 cache_dir_cpu[cpu] = NULL;
326}
327
328static int __cpuinit cache_hotplug(struct notifier_block *nfb,
329 unsigned long action, void *hcpu)
330{
331 int cpu = (long)hcpu;
332 int rc = 0;
333
334 switch (action & ~CPU_TASKS_FROZEN) {
335 case CPU_ONLINE:
336 rc = cache_add_cpu(cpu);
337 if (rc)
338 cache_remove_cpu(cpu);
339 break;
340 case CPU_DEAD:
341 cache_remove_cpu(cpu);
342 break;
343 }
344 return rc ? NOTIFY_BAD : NOTIFY_OK;
345}
346
347static int __init cache_init(void)
348{
349 int cpu;
350
351 if (!test_facility(34))
352 return 0;
353 cache_build_info();
354 for_each_online_cpu(cpu)
355 cache_add_cpu(cpu);
356 hotcpu_notifier(cache_hotplug, 0);
357 return 0;
358}
359device_initcall(cache_init);