diff options
author | Sudeep Holla <sudeep.holla@arm.com> | 2015-01-08 02:41:52 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2015-01-08 04:02:54 -0500 |
commit | d97d929f06d0e072cd36fba6bd9d25b29bae34fd (patch) | |
tree | 3c48f427f271170dd71f7d4ebe9bc8a96f777b1d /arch/s390/kernel | |
parent | e6a67ad0e29087201536792f7d5cecec4ff6fc64 (diff) |
s390: move cacheinfo sysfs to generic cacheinfo infrastructure
This patch removes the redundant sysfs cacheinfo code by reusing
the newly introduced generic cacheinfo infrastructure through the
commit 246246cbde5e ("drivers: base: support cpu cache information
interface to userspace via sysfs")
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/cache.c | 388 |
1 files changed, 92 insertions, 296 deletions
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index c0b03c28d157..fe21f074cf9f 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c | |||
@@ -5,37 +5,11 @@ | |||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/notifier.h> | ||
9 | #include <linux/seq_file.h> | 8 | #include <linux/seq_file.h> |
10 | #include <linux/init.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/cpu.h> | 9 | #include <linux/cpu.h> |
10 | #include <linux/cacheinfo.h> | ||
14 | #include <asm/facility.h> | 11 | #include <asm/facility.h> |
15 | 12 | ||
16 | struct cache { | ||
17 | unsigned long size; | ||
18 | unsigned int line_size; | ||
19 | unsigned int associativity; | ||
20 | unsigned int nr_sets; | ||
21 | unsigned int level : 3; | ||
22 | unsigned int type : 2; | ||
23 | unsigned int private : 1; | ||
24 | struct list_head list; | ||
25 | }; | ||
26 | |||
27 | struct cache_dir { | ||
28 | struct kobject *kobj; | ||
29 | struct cache_index_dir *index; | ||
30 | }; | ||
31 | |||
32 | struct cache_index_dir { | ||
33 | struct kobject kobj; | ||
34 | int cpu; | ||
35 | struct cache *cache; | ||
36 | struct cache_index_dir *next; | ||
37 | }; | ||
38 | |||
39 | enum { | 13 | enum { |
40 | CACHE_SCOPE_NOTEXISTS, | 14 | CACHE_SCOPE_NOTEXISTS, |
41 | CACHE_SCOPE_PRIVATE, | 15 | CACHE_SCOPE_PRIVATE, |
@@ -44,10 +18,10 @@ enum { | |||
44 | }; | 18 | }; |
45 | 19 | ||
46 | enum { | 20 | enum { |
47 | CACHE_TYPE_SEPARATE, | 21 | CTYPE_SEPARATE, |
48 | CACHE_TYPE_DATA, | 22 | CTYPE_DATA, |
49 | CACHE_TYPE_INSTRUCTION, | 23 | CTYPE_INSTRUCTION, |
50 | CACHE_TYPE_UNIFIED, | 24 | CTYPE_UNIFIED, |
51 | }; | 25 | }; |
52 | 26 | ||
53 | enum { | 27 | enum { |
@@ -70,39 +44,59 @@ struct cache_info { | |||
70 | }; | 44 | }; |
71 | 45 | ||
72 | #define CACHE_MAX_LEVEL 8 | 46 | #define CACHE_MAX_LEVEL 8 |
73 | |||
74 | union cache_topology { | 47 | union cache_topology { |
75 | struct cache_info ci[CACHE_MAX_LEVEL]; | 48 | struct cache_info ci[CACHE_MAX_LEVEL]; |
76 | unsigned long long raw; | 49 | unsigned long long raw; |
77 | }; | 50 | }; |
78 | 51 | ||
79 | static const char * const cache_type_string[] = { | 52 | static const char * const cache_type_string[] = { |
80 | "Data", | 53 | "", |
81 | "Instruction", | 54 | "Instruction", |
55 | "Data", | ||
56 | "", | ||
82 | "Unified", | 57 | "Unified", |
83 | }; | 58 | }; |
84 | 59 | ||
85 | static struct cache_dir *cache_dir_cpu[NR_CPUS]; | 60 | static const enum cache_type cache_type_map[] = { |
86 | static LIST_HEAD(cache_list); | 61 | [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE, |
62 | [CTYPE_DATA] = CACHE_TYPE_DATA, | ||
63 | [CTYPE_INSTRUCTION] = CACHE_TYPE_INST, | ||
64 | [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED, | ||
65 | }; | ||
87 | 66 | ||
88 | void show_cacheinfo(struct seq_file *m) | 67 | void show_cacheinfo(struct seq_file *m) |
89 | { | 68 | { |
90 | struct cache *cache; | 69 | int cpu = smp_processor_id(), idx; |
91 | int index = 0; | 70 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
71 | struct cacheinfo *cache; | ||
92 | 72 | ||
93 | list_for_each_entry(cache, &cache_list, list) { | 73 | for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { |
94 | seq_printf(m, "cache%-11d: ", index); | 74 | cache = this_cpu_ci->info_list + idx; |
75 | seq_printf(m, "cache%-11d: ", idx); | ||
95 | seq_printf(m, "level=%d ", cache->level); | 76 | seq_printf(m, "level=%d ", cache->level); |
96 | seq_printf(m, "type=%s ", cache_type_string[cache->type]); | 77 | seq_printf(m, "type=%s ", cache_type_string[cache->type]); |
97 | seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared"); | 78 | seq_printf(m, "scope=%s ", |
98 | seq_printf(m, "size=%luK ", cache->size >> 10); | 79 | cache->disable_sysfs ? "Shared" : "Private"); |
99 | seq_printf(m, "line_size=%u ", cache->line_size); | 80 | seq_printf(m, "size=%dK ", cache->size >> 10); |
100 | seq_printf(m, "associativity=%d", cache->associativity); | 81 | seq_printf(m, "line_size=%u ", cache->coherency_line_size); |
82 | seq_printf(m, "associativity=%d", cache->ways_of_associativity); | ||
101 | seq_puts(m, "\n"); | 83 | seq_puts(m, "\n"); |
102 | index++; | ||
103 | } | 84 | } |
104 | } | 85 | } |
105 | 86 | ||
87 | static inline enum cache_type get_cache_type(struct cache_info *ci, int level) | ||
88 | { | ||
89 | if (level >= CACHE_MAX_LEVEL) | ||
90 | return CACHE_TYPE_NOCACHE; | ||
91 | |||
92 | ci += level; | ||
93 | |||
94 | if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE) | ||
95 | return CACHE_TYPE_NOCACHE; | ||
96 | |||
97 | return cache_type_map[ci->type]; | ||
98 | } | ||
99 | |||
106 | static inline unsigned long ecag(int ai, int li, int ti) | 100 | static inline unsigned long ecag(int ai, int li, int ti) |
107 | { | 101 | { |
108 | unsigned long cmd, val; | 102 | unsigned long cmd, val; |
@@ -113,277 +107,79 @@ static inline unsigned long ecag(int ai, int li, int ti) | |||
113 | return val; | 107 | return val; |
114 | } | 108 | } |
115 | 109 | ||
116 | static int __init cache_add(int level, int private, int type) | 110 | static void ci_leaf_init(struct cacheinfo *this_leaf, int private, |
111 | enum cache_type type, unsigned int level) | ||
117 | { | 112 | { |
118 | struct cache *cache; | 113 | int ti, num_sets; |
119 | int ti; | 114 | int cpu = smp_processor_id(); |
120 | 115 | ||
121 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); | 116 | if (type == CACHE_TYPE_INST) |
122 | if (!cache) | ||
123 | return -ENOMEM; | ||
124 | if (type == CACHE_TYPE_INSTRUCTION) | ||
125 | ti = CACHE_TI_INSTRUCTION; | 117 | ti = CACHE_TI_INSTRUCTION; |
126 | else | 118 | else |
127 | ti = CACHE_TI_UNIFIED; | 119 | ti = CACHE_TI_UNIFIED; |
128 | cache->size = ecag(EXTRACT_SIZE, level, ti); | ||
129 | cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti); | ||
130 | cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); | ||
131 | cache->nr_sets = cache->size / cache->associativity; | ||
132 | cache->nr_sets /= cache->line_size; | ||
133 | cache->private = private; | ||
134 | cache->level = level + 1; | ||
135 | cache->type = type - 1; | ||
136 | list_add_tail(&cache->list, &cache_list); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static void __init cache_build_info(void) | ||
141 | { | ||
142 | struct cache *cache, *next; | ||
143 | union cache_topology ct; | ||
144 | int level, private, rc; | ||
145 | |||
146 | ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); | ||
147 | for (level = 0; level < CACHE_MAX_LEVEL; level++) { | ||
148 | switch (ct.ci[level].scope) { | ||
149 | case CACHE_SCOPE_SHARED: | ||
150 | private = 0; | ||
151 | break; | ||
152 | case CACHE_SCOPE_PRIVATE: | ||
153 | private = 1; | ||
154 | break; | ||
155 | default: | ||
156 | return; | ||
157 | } | ||
158 | if (ct.ci[level].type == CACHE_TYPE_SEPARATE) { | ||
159 | rc = cache_add(level, private, CACHE_TYPE_DATA); | ||
160 | rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION); | ||
161 | } else { | ||
162 | rc = cache_add(level, private, ct.ci[level].type); | ||
163 | } | ||
164 | if (rc) | ||
165 | goto error; | ||
166 | } | ||
167 | return; | ||
168 | error: | ||
169 | list_for_each_entry_safe(cache, next, &cache_list, list) { | ||
170 | list_del(&cache->list); | ||
171 | kfree(cache); | ||
172 | } | ||
173 | } | ||
174 | |||
175 | static struct cache_dir *cache_create_cache_dir(int cpu) | ||
176 | { | ||
177 | struct cache_dir *cache_dir; | ||
178 | struct kobject *kobj = NULL; | ||
179 | struct device *dev; | ||
180 | |||
181 | dev = get_cpu_device(cpu); | ||
182 | if (!dev) | ||
183 | goto out; | ||
184 | kobj = kobject_create_and_add("cache", &dev->kobj); | ||
185 | if (!kobj) | ||
186 | goto out; | ||
187 | cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); | ||
188 | if (!cache_dir) | ||
189 | goto out; | ||
190 | cache_dir->kobj = kobj; | ||
191 | cache_dir_cpu[cpu] = cache_dir; | ||
192 | return cache_dir; | ||
193 | out: | ||
194 | kobject_put(kobj); | ||
195 | return NULL; | ||
196 | } | ||
197 | |||
198 | static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj) | ||
199 | { | ||
200 | return container_of(kobj, struct cache_index_dir, kobj); | ||
201 | } | ||
202 | |||
203 | static void cache_index_release(struct kobject *kobj) | ||
204 | { | ||
205 | struct cache_index_dir *index; | ||
206 | |||
207 | index = kobj_to_cache_index_dir(kobj); | ||
208 | kfree(index); | ||
209 | } | ||
210 | |||
211 | static ssize_t cache_index_show(struct kobject *kobj, | ||
212 | struct attribute *attr, char *buf) | ||
213 | { | ||
214 | struct kobj_attribute *kobj_attr; | ||
215 | |||
216 | kobj_attr = container_of(attr, struct kobj_attribute, attr); | ||
217 | return kobj_attr->show(kobj, kobj_attr, buf); | ||
218 | } | ||
219 | |||
220 | #define DEFINE_CACHE_ATTR(_name, _format, _value) \ | ||
221 | static ssize_t cache_##_name##_show(struct kobject *kobj, \ | ||
222 | struct kobj_attribute *attr, \ | ||
223 | char *buf) \ | ||
224 | { \ | ||
225 | struct cache_index_dir *index; \ | ||
226 | \ | ||
227 | index = kobj_to_cache_index_dir(kobj); \ | ||
228 | return sprintf(buf, _format, _value); \ | ||
229 | } \ | ||
230 | static struct kobj_attribute cache_##_name##_attr = \ | ||
231 | __ATTR(_name, 0444, cache_##_name##_show, NULL); | ||
232 | 120 | ||
233 | DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10); | 121 | this_leaf->level = level + 1; |
234 | DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size); | 122 | this_leaf->type = type; |
235 | DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets); | 123 | this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti); |
236 | DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity); | 124 | this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, |
237 | DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]); | 125 | level, ti); |
238 | DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level); | 126 | this_leaf->size = ecag(EXTRACT_SIZE, level, ti); |
239 | 127 | ||
240 | static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf) | 128 | num_sets = this_leaf->size / this_leaf->coherency_line_size; |
241 | { | 129 | num_sets /= this_leaf->ways_of_associativity; |
242 | struct cache_index_dir *index; | 130 | this_leaf->number_of_sets = num_sets; |
243 | int len; | 131 | cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); |
244 | 132 | if (!private) | |
245 | index = kobj_to_cache_index_dir(kobj); | 133 | this_leaf->disable_sysfs = true; |
246 | len = type ? | ||
247 | cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) : | ||
248 | cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)); | ||
249 | len += sprintf(&buf[len], "\n"); | ||
250 | return len; | ||
251 | } | ||
252 | |||
253 | static ssize_t shared_cpu_map_show(struct kobject *kobj, | ||
254 | struct kobj_attribute *attr, char *buf) | ||
255 | { | ||
256 | return shared_cpu_map_func(kobj, 0, buf); | ||
257 | } | 134 | } |
258 | static struct kobj_attribute cache_shared_cpu_map_attr = | ||
259 | __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); | ||
260 | 135 | ||
261 | static ssize_t shared_cpu_list_show(struct kobject *kobj, | 136 | int init_cache_level(unsigned int cpu) |
262 | struct kobj_attribute *attr, char *buf) | ||
263 | { | 137 | { |
264 | return shared_cpu_map_func(kobj, 1, buf); | 138 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
265 | } | 139 | unsigned int level = 0, leaves = 0; |
266 | static struct kobj_attribute cache_shared_cpu_list_attr = | 140 | union cache_topology ct; |
267 | __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); | 141 | enum cache_type ctype; |
268 | |||
269 | static struct attribute *cache_index_default_attrs[] = { | ||
270 | &cache_type_attr.attr, | ||
271 | &cache_size_attr.attr, | ||
272 | &cache_number_of_sets_attr.attr, | ||
273 | &cache_ways_of_associativity_attr.attr, | ||
274 | &cache_level_attr.attr, | ||
275 | &cache_coherency_line_size_attr.attr, | ||
276 | &cache_shared_cpu_map_attr.attr, | ||
277 | &cache_shared_cpu_list_attr.attr, | ||
278 | NULL, | ||
279 | }; | ||
280 | |||
281 | static const struct sysfs_ops cache_index_ops = { | ||
282 | .show = cache_index_show, | ||
283 | }; | ||
284 | |||
285 | static struct kobj_type cache_index_type = { | ||
286 | .sysfs_ops = &cache_index_ops, | ||
287 | .release = cache_index_release, | ||
288 | .default_attrs = cache_index_default_attrs, | ||
289 | }; | ||
290 | |||
291 | static int cache_create_index_dir(struct cache_dir *cache_dir, | ||
292 | struct cache *cache, int index, int cpu) | ||
293 | { | ||
294 | struct cache_index_dir *index_dir; | ||
295 | int rc; | ||
296 | |||
297 | index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); | ||
298 | if (!index_dir) | ||
299 | return -ENOMEM; | ||
300 | index_dir->cache = cache; | ||
301 | index_dir->cpu = cpu; | ||
302 | rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, | ||
303 | cache_dir->kobj, "index%d", index); | ||
304 | if (rc) | ||
305 | goto out; | ||
306 | index_dir->next = cache_dir->index; | ||
307 | cache_dir->index = index_dir; | ||
308 | return 0; | ||
309 | out: | ||
310 | kfree(index_dir); | ||
311 | return rc; | ||
312 | } | ||
313 | 142 | ||
314 | static int cache_add_cpu(int cpu) | 143 | if (!this_cpu_ci) |
315 | { | 144 | return -EINVAL; |
316 | struct cache_dir *cache_dir; | ||
317 | struct cache *cache; | ||
318 | int rc, index = 0; | ||
319 | 145 | ||
320 | if (list_empty(&cache_list)) | 146 | ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); |
321 | return 0; | 147 | do { |
322 | cache_dir = cache_create_cache_dir(cpu); | 148 | ctype = get_cache_type(&ct.ci[0], level); |
323 | if (!cache_dir) | 149 | if (ctype == CACHE_TYPE_NOCACHE) |
324 | return -ENOMEM; | ||
325 | list_for_each_entry(cache, &cache_list, list) { | ||
326 | if (!cache->private) | ||
327 | break; | 150 | break; |
328 | rc = cache_create_index_dir(cache_dir, cache, index, cpu); | 151 | /* Separate instruction and data caches */ |
329 | if (rc) | 152 | leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; |
330 | return rc; | 153 | } while (++level < CACHE_MAX_LEVEL); |
331 | index++; | ||
332 | } | ||
333 | return 0; | ||
334 | } | ||
335 | 154 | ||
336 | static void cache_remove_cpu(int cpu) | 155 | this_cpu_ci->num_levels = level; |
337 | { | 156 | this_cpu_ci->num_leaves = leaves; |
338 | struct cache_index_dir *index, *next; | ||
339 | struct cache_dir *cache_dir; | ||
340 | 157 | ||
341 | cache_dir = cache_dir_cpu[cpu]; | 158 | return 0; |
342 | if (!cache_dir) | ||
343 | return; | ||
344 | index = cache_dir->index; | ||
345 | while (index) { | ||
346 | next = index->next; | ||
347 | kobject_put(&index->kobj); | ||
348 | index = next; | ||
349 | } | ||
350 | kobject_put(cache_dir->kobj); | ||
351 | kfree(cache_dir); | ||
352 | cache_dir_cpu[cpu] = NULL; | ||
353 | } | 159 | } |
354 | 160 | ||
355 | static int cache_hotplug(struct notifier_block *nfb, unsigned long action, | 161 | int populate_cache_leaves(unsigned int cpu) |
356 | void *hcpu) | ||
357 | { | 162 | { |
358 | int cpu = (long)hcpu; | 163 | unsigned int level, idx, pvt; |
359 | int rc = 0; | 164 | union cache_topology ct; |
165 | enum cache_type ctype; | ||
166 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | ||
167 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; | ||
360 | 168 | ||
361 | switch (action & ~CPU_TASKS_FROZEN) { | 169 | ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); |
362 | case CPU_ONLINE: | 170 | for (idx = 0, level = 0; level < this_cpu_ci->num_levels && |
363 | rc = cache_add_cpu(cpu); | 171 | idx < this_cpu_ci->num_leaves; idx++, level++) { |
364 | if (rc) | 172 | if (!this_leaf) |
365 | cache_remove_cpu(cpu); | 173 | return -EINVAL; |
366 | break; | 174 | |
367 | case CPU_DEAD: | 175 | pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; |
368 | cache_remove_cpu(cpu); | 176 | ctype = get_cache_type(&ct.ci[0], level); |
369 | break; | 177 | if (ctype == CACHE_TYPE_SEPARATE) { |
178 | ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level); | ||
179 | ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level); | ||
180 | } else { | ||
181 | ci_leaf_init(this_leaf++, pvt, ctype, level); | ||
182 | } | ||
370 | } | 183 | } |
371 | return rc ? NOTIFY_BAD : NOTIFY_OK; | ||
372 | } | ||
373 | |||
374 | static int __init cache_init(void) | ||
375 | { | ||
376 | int cpu; | ||
377 | |||
378 | if (!test_facility(34)) | ||
379 | return 0; | ||
380 | cache_build_info(); | ||
381 | |||
382 | cpu_notifier_register_begin(); | ||
383 | for_each_online_cpu(cpu) | ||
384 | cache_add_cpu(cpu); | ||
385 | __hotcpu_notifier(cache_hotplug, 0); | ||
386 | cpu_notifier_register_done(); | ||
387 | return 0; | 184 | return 0; |
388 | } | 185 | } |
389 | device_initcall(cache_init); | ||