diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/cacheinfo.c | 837 | ||||
-rw-r--r-- | arch/powerpc/kernel/cacheinfo.h | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci-common.c | 71 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci_64.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/sysfs.c | 300 |
9 files changed, 936 insertions, 308 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 1308a86e9070..8d1a419df35d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -29,7 +29,7 @@ endif | |||
29 | obj-y := cputable.o ptrace.o syscalls.o \ | 29 | obj-y := cputable.o ptrace.o syscalls.o \ |
30 | irq.o align.o signal_32.o pmc.o vdso.o \ | 30 | irq.o align.o signal_32.o pmc.o vdso.o \ |
31 | init_task.o process.o systbl.o idle.o \ | 31 | init_task.o process.o systbl.o idle.o \ |
32 | signal.o sysfs.o | 32 | signal.o sysfs.o cacheinfo.o |
33 | obj-y += vdso32/ | 33 | obj-y += vdso32/ |
34 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ | 34 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ |
35 | signal_64.o ptrace32.o \ | 35 | signal_64.o ptrace32.o \ |
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c new file mode 100644 index 000000000000..b33f0417a4bf --- /dev/null +++ b/arch/powerpc/kernel/cacheinfo.c | |||
@@ -0,0 +1,837 @@ | |||
1 | /* | ||
2 | * Processor cache information made available to userspace via sysfs; | ||
3 | * intended to be compatible with x86 intel_cacheinfo implementation. | ||
4 | * | ||
5 | * Copyright 2008 IBM Corporation | ||
6 | * Author: Nathan Lynch | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/cpu.h> | ||
14 | #include <linux/cpumask.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/kobject.h> | ||
18 | #include <linux/list.h> | ||
19 | #include <linux/notifier.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/percpu.h> | ||
22 | #include <asm/prom.h> | ||
23 | |||
24 | #include "cacheinfo.h" | ||
25 | |||
26 | /* per-cpu object for tracking: | ||
27 | * - a "cache" kobject for the top-level directory | ||
28 | * - a list of "index" objects representing the cpu's local cache hierarchy | ||
29 | */ | ||
30 | struct cache_dir { | ||
31 | struct kobject *kobj; /* bare (not embedded) kobject for cache | ||
32 | * directory */ | ||
33 | struct cache_index_dir *index; /* list of index objects */ | ||
34 | }; | ||
35 | |||
36 | /* "index" object: each cpu's cache directory has an index | ||
37 | * subdirectory corresponding to a cache object associated with the | ||
38 | * cpu. This object's lifetime is managed via the embedded kobject. | ||
39 | */ | ||
40 | struct cache_index_dir { | ||
41 | struct kobject kobj; | ||
42 | struct cache_index_dir *next; /* next index in parent directory */ | ||
43 | struct cache *cache; | ||
44 | }; | ||
45 | |||
46 | /* Template for determining which OF properties to query for a given | ||
47 | * cache type */ | ||
48 | struct cache_type_info { | ||
49 | const char *name; | ||
50 | const char *size_prop; | ||
51 | |||
52 | /* Allow for both [di]-cache-line-size and | ||
53 | * [di]-cache-block-size properties. According to the PowerPC | ||
54 | * Processor binding, -line-size should be provided if it | ||
55 | * differs from the cache block size (that which is operated | ||
56 | * on by cache instructions), so we look for -line-size first. | ||
57 | * See cache_get_line_size(). */ | ||
58 | |||
59 | const char *line_size_props[2]; | ||
60 | const char *nr_sets_prop; | ||
61 | }; | ||
62 | |||
63 | /* These are used to index the cache_type_info array. */ | ||
64 | #define CACHE_TYPE_UNIFIED 0 | ||
65 | #define CACHE_TYPE_INSTRUCTION 1 | ||
66 | #define CACHE_TYPE_DATA 2 | ||
67 | |||
68 | static const struct cache_type_info cache_type_info[] = { | ||
69 | { | ||
70 | /* PowerPC Processor binding says the [di]-cache-* | ||
71 | * must be equal on unified caches, so just use | ||
72 | * d-cache properties. */ | ||
73 | .name = "Unified", | ||
74 | .size_prop = "d-cache-size", | ||
75 | .line_size_props = { "d-cache-line-size", | ||
76 | "d-cache-block-size", }, | ||
77 | .nr_sets_prop = "d-cache-sets", | ||
78 | }, | ||
79 | { | ||
80 | .name = "Instruction", | ||
81 | .size_prop = "i-cache-size", | ||
82 | .line_size_props = { "i-cache-line-size", | ||
83 | "i-cache-block-size", }, | ||
84 | .nr_sets_prop = "i-cache-sets", | ||
85 | }, | ||
86 | { | ||
87 | .name = "Data", | ||
88 | .size_prop = "d-cache-size", | ||
89 | .line_size_props = { "d-cache-line-size", | ||
90 | "d-cache-block-size", }, | ||
91 | .nr_sets_prop = "d-cache-sets", | ||
92 | }, | ||
93 | }; | ||
94 | |||
95 | /* Cache object: each instance of this corresponds to a distinct cache | ||
96 | * in the system. There are separate objects for Harvard caches: one | ||
97 | * each for instruction and data, and each refers to the same OF node. | ||
98 | * The refcount of the OF node is elevated for the lifetime of the | ||
99 | * cache object. A cache object is released when its shared_cpu_map | ||
100 | * is cleared (see cache_cpu_clear). | ||
101 | * | ||
102 | * A cache object is on two lists: an unsorted global list | ||
103 | * (cache_list) of cache objects; and a singly-linked list | ||
104 | * representing the local cache hierarchy, which is ordered by level | ||
105 | * (e.g. L1d -> L1i -> L2 -> L3). | ||
106 | */ | ||
107 | struct cache { | ||
108 | struct device_node *ofnode; /* OF node for this cache, may be cpu */ | ||
109 | struct cpumask shared_cpu_map; /* online CPUs using this cache */ | ||
110 | int type; /* split cache disambiguation */ | ||
111 | int level; /* level not explicit in device tree */ | ||
112 | struct list_head list; /* global list of cache objects */ | ||
113 | struct cache *next_local; /* next cache of >= level */ | ||
114 | }; | ||
115 | |||
116 | static DEFINE_PER_CPU(struct cache_dir *, cache_dir); | ||
117 | |||
118 | /* traversal/modification of this list occurs only at cpu hotplug time; | ||
119 | * access is serialized by cpu hotplug locking | ||
120 | */ | ||
121 | static LIST_HEAD(cache_list); | ||
122 | |||
123 | static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k) | ||
124 | { | ||
125 | return container_of(k, struct cache_index_dir, kobj); | ||
126 | } | ||
127 | |||
128 | static const char *cache_type_string(const struct cache *cache) | ||
129 | { | ||
130 | return cache_type_info[cache->type].name; | ||
131 | } | ||
132 | |||
133 | static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode) | ||
134 | { | ||
135 | cache->type = type; | ||
136 | cache->level = level; | ||
137 | cache->ofnode = of_node_get(ofnode); | ||
138 | INIT_LIST_HEAD(&cache->list); | ||
139 | list_add(&cache->list, &cache_list); | ||
140 | } | ||
141 | |||
142 | static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode) | ||
143 | { | ||
144 | struct cache *cache; | ||
145 | |||
146 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); | ||
147 | if (cache) | ||
148 | cache_init(cache, type, level, ofnode); | ||
149 | |||
150 | return cache; | ||
151 | } | ||
152 | |||
153 | static void release_cache_debugcheck(struct cache *cache) | ||
154 | { | ||
155 | struct cache *iter; | ||
156 | |||
157 | list_for_each_entry(iter, &cache_list, list) | ||
158 | WARN_ONCE(iter->next_local == cache, | ||
159 | "cache for %s(%s) refers to cache for %s(%s)\n", | ||
160 | iter->ofnode->full_name, | ||
161 | cache_type_string(iter), | ||
162 | cache->ofnode->full_name, | ||
163 | cache_type_string(cache)); | ||
164 | } | ||
165 | |||
166 | static void release_cache(struct cache *cache) | ||
167 | { | ||
168 | if (!cache) | ||
169 | return; | ||
170 | |||
171 | pr_debug("freeing L%d %s cache for %s\n", cache->level, | ||
172 | cache_type_string(cache), cache->ofnode->full_name); | ||
173 | |||
174 | release_cache_debugcheck(cache); | ||
175 | list_del(&cache->list); | ||
176 | of_node_put(cache->ofnode); | ||
177 | kfree(cache); | ||
178 | } | ||
179 | |||
180 | static void cache_cpu_set(struct cache *cache, int cpu) | ||
181 | { | ||
182 | struct cache *next = cache; | ||
183 | |||
184 | while (next) { | ||
185 | WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), | ||
186 | "CPU %i already accounted in %s(%s)\n", | ||
187 | cpu, next->ofnode->full_name, | ||
188 | cache_type_string(next)); | ||
189 | cpumask_set_cpu(cpu, &next->shared_cpu_map); | ||
190 | next = next->next_local; | ||
191 | } | ||
192 | } | ||
193 | |||
194 | static int cache_size(const struct cache *cache, unsigned int *ret) | ||
195 | { | ||
196 | const char *propname; | ||
197 | const u32 *cache_size; | ||
198 | |||
199 | propname = cache_type_info[cache->type].size_prop; | ||
200 | |||
201 | cache_size = of_get_property(cache->ofnode, propname, NULL); | ||
202 | if (!cache_size) | ||
203 | return -ENODEV; | ||
204 | |||
205 | *ret = *cache_size; | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int cache_size_kb(const struct cache *cache, unsigned int *ret) | ||
210 | { | ||
211 | unsigned int size; | ||
212 | |||
213 | if (cache_size(cache, &size)) | ||
214 | return -ENODEV; | ||
215 | |||
216 | *ret = size / 1024; | ||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ | ||
221 | static int cache_get_line_size(const struct cache *cache, unsigned int *ret) | ||
222 | { | ||
223 | const u32 *line_size; | ||
224 | int i, lim; | ||
225 | |||
226 | lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); | ||
227 | |||
228 | for (i = 0; i < lim; i++) { | ||
229 | const char *propname; | ||
230 | |||
231 | propname = cache_type_info[cache->type].line_size_props[i]; | ||
232 | line_size = of_get_property(cache->ofnode, propname, NULL); | ||
233 | if (line_size) | ||
234 | break; | ||
235 | } | ||
236 | |||
237 | if (!line_size) | ||
238 | return -ENODEV; | ||
239 | |||
240 | *ret = *line_size; | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static int cache_nr_sets(const struct cache *cache, unsigned int *ret) | ||
245 | { | ||
246 | const char *propname; | ||
247 | const u32 *nr_sets; | ||
248 | |||
249 | propname = cache_type_info[cache->type].nr_sets_prop; | ||
250 | |||
251 | nr_sets = of_get_property(cache->ofnode, propname, NULL); | ||
252 | if (!nr_sets) | ||
253 | return -ENODEV; | ||
254 | |||
255 | *ret = *nr_sets; | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static int cache_associativity(const struct cache *cache, unsigned int *ret) | ||
260 | { | ||
261 | unsigned int line_size; | ||
262 | unsigned int nr_sets; | ||
263 | unsigned int size; | ||
264 | |||
265 | if (cache_nr_sets(cache, &nr_sets)) | ||
266 | goto err; | ||
267 | |||
268 | /* If the cache is fully associative, there is no need to | ||
269 | * check the other properties. | ||
270 | */ | ||
271 | if (nr_sets == 1) { | ||
272 | *ret = 0; | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | if (cache_get_line_size(cache, &line_size)) | ||
277 | goto err; | ||
278 | if (cache_size(cache, &size)) | ||
279 | goto err; | ||
280 | |||
281 | if (!(nr_sets > 0 && size > 0 && line_size > 0)) | ||
282 | goto err; | ||
283 | |||
284 | *ret = (size / nr_sets) / line_size; | ||
285 | return 0; | ||
286 | err: | ||
287 | return -ENODEV; | ||
288 | } | ||
289 | |||
290 | /* helper for dealing with split caches */ | ||
291 | static struct cache *cache_find_first_sibling(struct cache *cache) | ||
292 | { | ||
293 | struct cache *iter; | ||
294 | |||
295 | if (cache->type == CACHE_TYPE_UNIFIED) | ||
296 | return cache; | ||
297 | |||
298 | list_for_each_entry(iter, &cache_list, list) | ||
299 | if (iter->ofnode == cache->ofnode && iter->next_local == cache) | ||
300 | return iter; | ||
301 | |||
302 | return cache; | ||
303 | } | ||
304 | |||
305 | /* return the first cache on a local list matching node */ | ||
306 | static struct cache *cache_lookup_by_node(const struct device_node *node) | ||
307 | { | ||
308 | struct cache *cache = NULL; | ||
309 | struct cache *iter; | ||
310 | |||
311 | list_for_each_entry(iter, &cache_list, list) { | ||
312 | if (iter->ofnode != node) | ||
313 | continue; | ||
314 | cache = cache_find_first_sibling(iter); | ||
315 | break; | ||
316 | } | ||
317 | |||
318 | return cache; | ||
319 | } | ||
320 | |||
321 | static bool cache_node_is_unified(const struct device_node *np) | ||
322 | { | ||
323 | return of_get_property(np, "cache-unified", NULL); | ||
324 | } | ||
325 | |||
326 | static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level) | ||
327 | { | ||
328 | struct cache *cache; | ||
329 | |||
330 | pr_debug("creating L%d ucache for %s\n", level, node->full_name); | ||
331 | |||
332 | cache = new_cache(CACHE_TYPE_UNIFIED, level, node); | ||
333 | |||
334 | return cache; | ||
335 | } | ||
336 | |||
337 | static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level) | ||
338 | { | ||
339 | struct cache *dcache, *icache; | ||
340 | |||
341 | pr_debug("creating L%d dcache and icache for %s\n", level, | ||
342 | node->full_name); | ||
343 | |||
344 | dcache = new_cache(CACHE_TYPE_DATA, level, node); | ||
345 | icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node); | ||
346 | |||
347 | if (!dcache || !icache) | ||
348 | goto err; | ||
349 | |||
350 | dcache->next_local = icache; | ||
351 | |||
352 | return dcache; | ||
353 | err: | ||
354 | release_cache(dcache); | ||
355 | release_cache(icache); | ||
356 | return NULL; | ||
357 | } | ||
358 | |||
359 | static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level) | ||
360 | { | ||
361 | struct cache *cache; | ||
362 | |||
363 | if (cache_node_is_unified(node)) | ||
364 | cache = cache_do_one_devnode_unified(node, level); | ||
365 | else | ||
366 | cache = cache_do_one_devnode_split(node, level); | ||
367 | |||
368 | return cache; | ||
369 | } | ||
370 | |||
371 | static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level) | ||
372 | { | ||
373 | struct cache *cache; | ||
374 | |||
375 | cache = cache_lookup_by_node(node); | ||
376 | |||
377 | WARN_ONCE(cache && cache->level != level, | ||
378 | "cache level mismatch on lookup (got %d, expected %d)\n", | ||
379 | cache->level, level); | ||
380 | |||
381 | if (!cache) | ||
382 | cache = cache_do_one_devnode(node, level); | ||
383 | |||
384 | return cache; | ||
385 | } | ||
386 | |||
387 | static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger) | ||
388 | { | ||
389 | while (smaller->next_local) { | ||
390 | if (smaller->next_local == bigger) | ||
391 | return; /* already linked */ | ||
392 | smaller = smaller->next_local; | ||
393 | } | ||
394 | |||
395 | smaller->next_local = bigger; | ||
396 | } | ||
397 | |||
398 | static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache) | ||
399 | { | ||
400 | WARN_ON_ONCE(cache->level != 1); | ||
401 | WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu")); | ||
402 | } | ||
403 | |||
404 | static void __cpuinit do_subsidiary_caches(struct cache *cache) | ||
405 | { | ||
406 | struct device_node *subcache_node; | ||
407 | int level = cache->level; | ||
408 | |||
409 | do_subsidiary_caches_debugcheck(cache); | ||
410 | |||
411 | while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { | ||
412 | struct cache *subcache; | ||
413 | |||
414 | level++; | ||
415 | subcache = cache_lookup_or_instantiate(subcache_node, level); | ||
416 | of_node_put(subcache_node); | ||
417 | if (!subcache) | ||
418 | break; | ||
419 | |||
420 | link_cache_lists(cache, subcache); | ||
421 | cache = subcache; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id) | ||
426 | { | ||
427 | struct device_node *cpu_node; | ||
428 | struct cache *cpu_cache = NULL; | ||
429 | |||
430 | pr_debug("creating cache object(s) for CPU %i\n", cpu_id); | ||
431 | |||
432 | cpu_node = of_get_cpu_node(cpu_id, NULL); | ||
433 | WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); | ||
434 | if (!cpu_node) | ||
435 | goto out; | ||
436 | |||
437 | cpu_cache = cache_lookup_or_instantiate(cpu_node, 1); | ||
438 | if (!cpu_cache) | ||
439 | goto out; | ||
440 | |||
441 | do_subsidiary_caches(cpu_cache); | ||
442 | |||
443 | cache_cpu_set(cpu_cache, cpu_id); | ||
444 | out: | ||
445 | of_node_put(cpu_node); | ||
446 | |||
447 | return cpu_cache; | ||
448 | } | ||
449 | |||
450 | static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id) | ||
451 | { | ||
452 | struct cache_dir *cache_dir; | ||
453 | struct sys_device *sysdev; | ||
454 | struct kobject *kobj = NULL; | ||
455 | |||
456 | sysdev = get_cpu_sysdev(cpu_id); | ||
457 | WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id); | ||
458 | if (!sysdev) | ||
459 | goto err; | ||
460 | |||
461 | kobj = kobject_create_and_add("cache", &sysdev->kobj); | ||
462 | if (!kobj) | ||
463 | goto err; | ||
464 | |||
465 | cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); | ||
466 | if (!cache_dir) | ||
467 | goto err; | ||
468 | |||
469 | cache_dir->kobj = kobj; | ||
470 | |||
471 | WARN_ON_ONCE(per_cpu(cache_dir, cpu_id) != NULL); | ||
472 | |||
473 | per_cpu(cache_dir, cpu_id) = cache_dir; | ||
474 | |||
475 | return cache_dir; | ||
476 | err: | ||
477 | kobject_put(kobj); | ||
478 | return NULL; | ||
479 | } | ||
480 | |||
481 | static void cache_index_release(struct kobject *kobj) | ||
482 | { | ||
483 | struct cache_index_dir *index; | ||
484 | |||
485 | index = kobj_to_cache_index_dir(kobj); | ||
486 | |||
487 | pr_debug("freeing index directory for L%d %s cache\n", | ||
488 | index->cache->level, cache_type_string(index->cache)); | ||
489 | |||
490 | kfree(index); | ||
491 | } | ||
492 | |||
493 | static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf) | ||
494 | { | ||
495 | struct kobj_attribute *kobj_attr; | ||
496 | |||
497 | kobj_attr = container_of(attr, struct kobj_attribute, attr); | ||
498 | |||
499 | return kobj_attr->show(k, kobj_attr, buf); | ||
500 | } | ||
501 | |||
502 | static struct cache *index_kobj_to_cache(struct kobject *k) | ||
503 | { | ||
504 | struct cache_index_dir *index; | ||
505 | |||
506 | index = kobj_to_cache_index_dir(k); | ||
507 | |||
508 | return index->cache; | ||
509 | } | ||
510 | |||
511 | static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
512 | { | ||
513 | unsigned int size_kb; | ||
514 | struct cache *cache; | ||
515 | |||
516 | cache = index_kobj_to_cache(k); | ||
517 | |||
518 | if (cache_size_kb(cache, &size_kb)) | ||
519 | return -ENODEV; | ||
520 | |||
521 | return sprintf(buf, "%uK\n", size_kb); | ||
522 | } | ||
523 | |||
524 | static struct kobj_attribute cache_size_attr = | ||
525 | __ATTR(size, 0444, size_show, NULL); | ||
526 | |||
527 | |||
528 | static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
529 | { | ||
530 | unsigned int line_size; | ||
531 | struct cache *cache; | ||
532 | |||
533 | cache = index_kobj_to_cache(k); | ||
534 | |||
535 | if (cache_get_line_size(cache, &line_size)) | ||
536 | return -ENODEV; | ||
537 | |||
538 | return sprintf(buf, "%u\n", line_size); | ||
539 | } | ||
540 | |||
541 | static struct kobj_attribute cache_line_size_attr = | ||
542 | __ATTR(coherency_line_size, 0444, line_size_show, NULL); | ||
543 | |||
544 | static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
545 | { | ||
546 | unsigned int nr_sets; | ||
547 | struct cache *cache; | ||
548 | |||
549 | cache = index_kobj_to_cache(k); | ||
550 | |||
551 | if (cache_nr_sets(cache, &nr_sets)) | ||
552 | return -ENODEV; | ||
553 | |||
554 | return sprintf(buf, "%u\n", nr_sets); | ||
555 | } | ||
556 | |||
557 | static struct kobj_attribute cache_nr_sets_attr = | ||
558 | __ATTR(number_of_sets, 0444, nr_sets_show, NULL); | ||
559 | |||
560 | static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
561 | { | ||
562 | unsigned int associativity; | ||
563 | struct cache *cache; | ||
564 | |||
565 | cache = index_kobj_to_cache(k); | ||
566 | |||
567 | if (cache_associativity(cache, &associativity)) | ||
568 | return -ENODEV; | ||
569 | |||
570 | return sprintf(buf, "%u\n", associativity); | ||
571 | } | ||
572 | |||
573 | static struct kobj_attribute cache_assoc_attr = | ||
574 | __ATTR(ways_of_associativity, 0444, associativity_show, NULL); | ||
575 | |||
576 | static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
577 | { | ||
578 | struct cache *cache; | ||
579 | |||
580 | cache = index_kobj_to_cache(k); | ||
581 | |||
582 | return sprintf(buf, "%s\n", cache_type_string(cache)); | ||
583 | } | ||
584 | |||
585 | static struct kobj_attribute cache_type_attr = | ||
586 | __ATTR(type, 0444, type_show, NULL); | ||
587 | |||
588 | static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
589 | { | ||
590 | struct cache_index_dir *index; | ||
591 | struct cache *cache; | ||
592 | |||
593 | index = kobj_to_cache_index_dir(k); | ||
594 | cache = index->cache; | ||
595 | |||
596 | return sprintf(buf, "%d\n", cache->level); | ||
597 | } | ||
598 | |||
599 | static struct kobj_attribute cache_level_attr = | ||
600 | __ATTR(level, 0444, level_show, NULL); | ||
601 | |||
602 | static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
603 | { | ||
604 | struct cache_index_dir *index; | ||
605 | struct cache *cache; | ||
606 | int len; | ||
607 | int n = 0; | ||
608 | |||
609 | index = kobj_to_cache_index_dir(k); | ||
610 | cache = index->cache; | ||
611 | len = PAGE_SIZE - 2; | ||
612 | |||
613 | if (len > 1) { | ||
614 | n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map); | ||
615 | buf[n++] = '\n'; | ||
616 | buf[n] = '\0'; | ||
617 | } | ||
618 | return n; | ||
619 | } | ||
620 | |||
621 | static struct kobj_attribute cache_shared_cpu_map_attr = | ||
622 | __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); | ||
623 | |||
624 | /* Attributes which should always be created -- the kobject/sysfs core | ||
625 | * does this automatically via kobj_type->default_attrs. This is the | ||
626 | * minimum data required to uniquely identify a cache. | ||
627 | */ | ||
628 | static struct attribute *cache_index_default_attrs[] = { | ||
629 | &cache_type_attr.attr, | ||
630 | &cache_level_attr.attr, | ||
631 | &cache_shared_cpu_map_attr.attr, | ||
632 | NULL, | ||
633 | }; | ||
634 | |||
635 | /* Attributes which should be created if the cache device node has the | ||
636 | * right properties -- see cacheinfo_create_index_opt_attrs | ||
637 | */ | ||
638 | static struct kobj_attribute *cache_index_opt_attrs[] = { | ||
639 | &cache_size_attr, | ||
640 | &cache_line_size_attr, | ||
641 | &cache_nr_sets_attr, | ||
642 | &cache_assoc_attr, | ||
643 | }; | ||
644 | |||
645 | static struct sysfs_ops cache_index_ops = { | ||
646 | .show = cache_index_show, | ||
647 | }; | ||
648 | |||
649 | static struct kobj_type cache_index_type = { | ||
650 | .release = cache_index_release, | ||
651 | .sysfs_ops = &cache_index_ops, | ||
652 | .default_attrs = cache_index_default_attrs, | ||
653 | }; | ||
654 | |||
655 | static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) | ||
656 | { | ||
657 | const char *cache_name; | ||
658 | const char *cache_type; | ||
659 | struct cache *cache; | ||
660 | char *buf; | ||
661 | int i; | ||
662 | |||
663 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
664 | if (!buf) | ||
665 | return; | ||
666 | |||
667 | cache = dir->cache; | ||
668 | cache_name = cache->ofnode->full_name; | ||
669 | cache_type = cache_type_string(cache); | ||
670 | |||
671 | /* We don't want to create an attribute that can't provide a | ||
672 | * meaningful value. Check the return value of each optional | ||
673 | * attribute's ->show method before registering the | ||
674 | * attribute. | ||
675 | */ | ||
676 | for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { | ||
677 | struct kobj_attribute *attr; | ||
678 | ssize_t rc; | ||
679 | |||
680 | attr = cache_index_opt_attrs[i]; | ||
681 | |||
682 | rc = attr->show(&dir->kobj, attr, buf); | ||
683 | if (rc <= 0) { | ||
684 | pr_debug("not creating %s attribute for " | ||
685 | "%s(%s) (rc = %zd)\n", | ||
686 | attr->attr.name, cache_name, | ||
687 | cache_type, rc); | ||
688 | continue; | ||
689 | } | ||
690 | if (sysfs_create_file(&dir->kobj, &attr->attr)) | ||
691 | pr_debug("could not create %s attribute for %s(%s)\n", | ||
692 | attr->attr.name, cache_name, cache_type); | ||
693 | } | ||
694 | |||
695 | kfree(buf); | ||
696 | } | ||
697 | |||
698 | static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir) | ||
699 | { | ||
700 | struct cache_index_dir *index_dir; | ||
701 | int rc; | ||
702 | |||
703 | index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); | ||
704 | if (!index_dir) | ||
705 | goto err; | ||
706 | |||
707 | index_dir->cache = cache; | ||
708 | |||
709 | rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, | ||
710 | cache_dir->kobj, "index%d", index); | ||
711 | if (rc) | ||
712 | goto err; | ||
713 | |||
714 | index_dir->next = cache_dir->index; | ||
715 | cache_dir->index = index_dir; | ||
716 | |||
717 | cacheinfo_create_index_opt_attrs(index_dir); | ||
718 | |||
719 | return; | ||
720 | err: | ||
721 | kfree(index_dir); | ||
722 | } | ||
723 | |||
724 | static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list) | ||
725 | { | ||
726 | struct cache_dir *cache_dir; | ||
727 | struct cache *cache; | ||
728 | int index = 0; | ||
729 | |||
730 | cache_dir = cacheinfo_create_cache_dir(cpu_id); | ||
731 | if (!cache_dir) | ||
732 | return; | ||
733 | |||
734 | cache = cache_list; | ||
735 | while (cache) { | ||
736 | cacheinfo_create_index_dir(cache, index, cache_dir); | ||
737 | index++; | ||
738 | cache = cache->next_local; | ||
739 | } | ||
740 | } | ||
741 | |||
742 | void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id) | ||
743 | { | ||
744 | struct cache *cache; | ||
745 | |||
746 | cache = cache_chain_instantiate(cpu_id); | ||
747 | if (!cache) | ||
748 | return; | ||
749 | |||
750 | cacheinfo_sysfs_populate(cpu_id, cache); | ||
751 | } | ||
752 | |||
753 | #ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */ | ||
754 | |||
755 | static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) | ||
756 | { | ||
757 | struct device_node *cpu_node; | ||
758 | struct cache *cache; | ||
759 | |||
760 | cpu_node = of_get_cpu_node(cpu_id, NULL); | ||
761 | WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); | ||
762 | if (!cpu_node) | ||
763 | return NULL; | ||
764 | |||
765 | cache = cache_lookup_by_node(cpu_node); | ||
766 | of_node_put(cpu_node); | ||
767 | |||
768 | return cache; | ||
769 | } | ||
770 | |||
771 | static void remove_index_dirs(struct cache_dir *cache_dir) | ||
772 | { | ||
773 | struct cache_index_dir *index; | ||
774 | |||
775 | index = cache_dir->index; | ||
776 | |||
777 | while (index) { | ||
778 | struct cache_index_dir *next; | ||
779 | |||
780 | next = index->next; | ||
781 | kobject_put(&index->kobj); | ||
782 | index = next; | ||
783 | } | ||
784 | } | ||
785 | |||
786 | static void remove_cache_dir(struct cache_dir *cache_dir) | ||
787 | { | ||
788 | remove_index_dirs(cache_dir); | ||
789 | |||
790 | kobject_put(cache_dir->kobj); | ||
791 | |||
792 | kfree(cache_dir); | ||
793 | } | ||
794 | |||
795 | static void cache_cpu_clear(struct cache *cache, int cpu) | ||
796 | { | ||
797 | while (cache) { | ||
798 | struct cache *next = cache->next_local; | ||
799 | |||
800 | WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), | ||
801 | "CPU %i not accounted in %s(%s)\n", | ||
802 | cpu, cache->ofnode->full_name, | ||
803 | cache_type_string(cache)); | ||
804 | |||
805 | cpumask_clear_cpu(cpu, &cache->shared_cpu_map); | ||
806 | |||
807 | /* Release the cache object if all the cpus using it | ||
808 | * are offline */ | ||
809 | if (cpumask_empty(&cache->shared_cpu_map)) | ||
810 | release_cache(cache); | ||
811 | |||
812 | cache = next; | ||
813 | } | ||
814 | } | ||
815 | |||
816 | void cacheinfo_cpu_offline(unsigned int cpu_id) | ||
817 | { | ||
818 | struct cache_dir *cache_dir; | ||
819 | struct cache *cache; | ||
820 | |||
821 | /* Prevent userspace from seeing inconsistent state - remove | ||
822 | * the sysfs hierarchy first */ | ||
823 | cache_dir = per_cpu(cache_dir, cpu_id); | ||
824 | |||
825 | /* careful, sysfs population may have failed */ | ||
826 | if (cache_dir) | ||
827 | remove_cache_dir(cache_dir); | ||
828 | |||
829 | per_cpu(cache_dir, cpu_id) = NULL; | ||
830 | |||
831 | /* clear the CPU's bit in its cache chain, possibly freeing | ||
832 | * cache objects */ | ||
833 | cache = cache_lookup_by_cpu(cpu_id); | ||
834 | if (cache) | ||
835 | cache_cpu_clear(cache, cpu_id); | ||
836 | } | ||
837 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/powerpc/kernel/cacheinfo.h b/arch/powerpc/kernel/cacheinfo.h new file mode 100644 index 000000000000..a7b74d36acd7 --- /dev/null +++ b/arch/powerpc/kernel/cacheinfo.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _PPC_CACHEINFO_H | ||
2 | #define _PPC_CACHEINFO_H | ||
3 | |||
4 | /* These are just hooks for sysfs.c to use. */ | ||
5 | extern void cacheinfo_cpu_online(unsigned int cpu_id); | ||
6 | extern void cacheinfo_cpu_offline(unsigned int cpu_id); | ||
7 | |||
8 | #endif /* _PPC_CACHEINFO_H */ | ||
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 2538030954d8..da5a3855a0c4 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * 2 of the License, or (at your option) any later version. | 16 | * 2 of the License, or (at your option) any later version. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #undef DEBUG | 19 | #define DEBUG |
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
@@ -1356,6 +1356,63 @@ static void __init pcibios_allocate_resources(int pass) | |||
1356 | } | 1356 | } |
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) | ||
1360 | { | ||
1361 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
1362 | resource_size_t offset; | ||
1363 | struct resource *res, *pres; | ||
1364 | int i; | ||
1365 | |||
1366 | pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); | ||
1367 | |||
1368 | /* Check for IO */ | ||
1369 | if (!(hose->io_resource.flags & IORESOURCE_IO)) | ||
1370 | goto no_io; | ||
1371 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
1372 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | ||
1373 | BUG_ON(res == NULL); | ||
1374 | res->name = "Legacy IO"; | ||
1375 | res->flags = IORESOURCE_IO; | ||
1376 | res->start = offset; | ||
1377 | res->end = (offset + 0xfff) & 0xfffffffful; | ||
1378 | pr_debug("Candidate legacy IO: %pR\n", res); | ||
1379 | if (request_resource(&hose->io_resource, res)) { | ||
1380 | printk(KERN_DEBUG | ||
1381 | "PCI %04x:%02x Cannot reserve Legacy IO %pR\n", | ||
1382 | pci_domain_nr(bus), bus->number, res); | ||
1383 | kfree(res); | ||
1384 | } | ||
1385 | |||
1386 | no_io: | ||
1387 | /* Check for memory */ | ||
1388 | offset = hose->pci_mem_offset; | ||
1389 | pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); | ||
1390 | for (i = 0; i < 3; i++) { | ||
1391 | pres = &hose->mem_resources[i]; | ||
1392 | if (!(pres->flags & IORESOURCE_MEM)) | ||
1393 | continue; | ||
1394 | pr_debug("hose mem res: %pR\n", pres); | ||
1395 | if ((pres->start - offset) <= 0xa0000 && | ||
1396 | (pres->end - offset) >= 0xbffff) | ||
1397 | break; | ||
1398 | } | ||
1399 | if (i >= 3) | ||
1400 | return; | ||
1401 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | ||
1402 | BUG_ON(res == NULL); | ||
1403 | res->name = "Legacy VGA memory"; | ||
1404 | res->flags = IORESOURCE_MEM; | ||
1405 | res->start = 0xa0000 + offset; | ||
1406 | res->end = 0xbffff + offset; | ||
1407 | pr_debug("Candidate VGA memory: %pR\n", res); | ||
1408 | if (request_resource(pres, res)) { | ||
1409 | printk(KERN_DEBUG | ||
1410 | "PCI %04x:%02x Cannot reserve VGA memory %pR\n", | ||
1411 | pci_domain_nr(bus), bus->number, res); | ||
1412 | kfree(res); | ||
1413 | } | ||
1414 | } | ||
1415 | |||
1359 | void __init pcibios_resource_survey(void) | 1416 | void __init pcibios_resource_survey(void) |
1360 | { | 1417 | { |
1361 | struct pci_bus *b; | 1418 | struct pci_bus *b; |
@@ -1371,6 +1428,18 @@ void __init pcibios_resource_survey(void) | |||
1371 | pcibios_allocate_resources(1); | 1428 | pcibios_allocate_resources(1); |
1372 | } | 1429 | } |
1373 | 1430 | ||
1431 | /* Before we start assigning unassigned resource, we try to reserve | ||
1432 | * the low IO area and the VGA memory area if they intersect the | ||
1433 | * bus available resources to avoid allocating things on top of them | ||
1434 | */ | ||
1435 | if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { | ||
1436 | list_for_each_entry(b, &pci_root_buses, node) | ||
1437 | pcibios_reserve_legacy_regions(b); | ||
1438 | } | ||
1439 | |||
1440 | /* Now, if the platform didn't decide to blindly trust the firmware, | ||
1441 | * we proceed to assigning things that were left unassigned | ||
1442 | */ | ||
1374 | if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { | 1443 | if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { |
1375 | pr_debug("PCI: Assigning unassigned resouces...\n"); | 1444 | pr_debug("PCI: Assigning unassigned resouces...\n"); |
1376 | pci_assign_unassigned_resources(); | 1445 | pci_assign_unassigned_resources(); |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 39fadc6e1492..586962f65c2a 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -560,9 +560,14 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, | |||
560 | * G5 machines... So when something asks for bus 0 io base | 560 | * G5 machines... So when something asks for bus 0 io base |
561 | * (bus 0 is HT root), we return the AGP one instead. | 561 | * (bus 0 is HT root), we return the AGP one instead. |
562 | */ | 562 | */ |
563 | if (machine_is_compatible("MacRISC4")) | 563 | if (in_bus == 0 && machine_is_compatible("MacRISC4")) { |
564 | if (in_bus == 0) | 564 | struct device_node *agp; |
565 | |||
566 | agp = of_find_compatible_node(NULL, NULL, "u3-agp"); | ||
567 | if (agp) | ||
565 | in_bus = 0xf0; | 568 | in_bus = 0xf0; |
569 | of_node_put(agp); | ||
570 | } | ||
566 | 571 | ||
567 | /* That syscall isn't quite compatible with PCI domains, but it's | 572 | /* That syscall isn't quite compatible with PCI domains, but it's |
568 | * used on pre-domains setup. We return the first match | 573 | * used on pre-domains setup. We return the first match |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index dcec1325d340..c8b27bb4dbde 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -165,6 +165,7 @@ EXPORT_SYMBOL(timer_interrupt); | |||
165 | EXPORT_SYMBOL(irq_desc); | 165 | EXPORT_SYMBOL(irq_desc); |
166 | EXPORT_SYMBOL(tb_ticks_per_jiffy); | 166 | EXPORT_SYMBOL(tb_ticks_per_jiffy); |
167 | EXPORT_SYMBOL(cacheable_memcpy); | 167 | EXPORT_SYMBOL(cacheable_memcpy); |
168 | EXPORT_SYMBOL(cacheable_memzero); | ||
168 | #endif | 169 | #endif |
169 | 170 | ||
170 | #ifdef CONFIG_PPC32 | 171 | #ifdef CONFIG_PPC32 |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 6f73c739f1e2..c09cffafb6ee 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -824,11 +824,11 @@ static int __init early_init_dt_scan_chosen(unsigned long node, | |||
824 | #endif | 824 | #endif |
825 | 825 | ||
826 | #ifdef CONFIG_KEXEC | 826 | #ifdef CONFIG_KEXEC |
827 | lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); | 827 | lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); |
828 | if (lprop) | 828 | if (lprop) |
829 | crashk_res.start = *lprop; | 829 | crashk_res.start = *lprop; |
830 | 830 | ||
831 | lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); | 831 | lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); |
832 | if (lprop) | 832 | if (lprop) |
833 | crashk_res.end = crashk_res.start + *lprop - 1; | 833 | crashk_res.end = crashk_res.start + *lprop - 1; |
834 | #endif | 834 | #endif |
@@ -893,12 +893,12 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) | |||
893 | u64 base, size, lmb_size; | 893 | u64 base, size, lmb_size; |
894 | unsigned int is_kexec_kdump = 0, rngs; | 894 | unsigned int is_kexec_kdump = 0, rngs; |
895 | 895 | ||
896 | ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l); | 896 | ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); |
897 | if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t)) | 897 | if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t)) |
898 | return 0; | 898 | return 0; |
899 | lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); | 899 | lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); |
900 | 900 | ||
901 | dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); | 901 | dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); |
902 | if (dm == NULL || l < sizeof(cell_t)) | 902 | if (dm == NULL || l < sizeof(cell_t)) |
903 | return 0; | 903 | return 0; |
904 | 904 | ||
@@ -907,7 +907,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) | |||
907 | return 0; | 907 | return 0; |
908 | 908 | ||
909 | /* check if this is a kexec/kdump kernel. */ | 909 | /* check if this is a kexec/kdump kernel. */ |
910 | usm = (cell_t *)of_get_flat_dt_prop(node, "linux,drconf-usable-memory", | 910 | usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", |
911 | &l); | 911 | &l); |
912 | if (usm != NULL) | 912 | if (usm != NULL) |
913 | is_kexec_kdump = 1; | 913 | is_kexec_kdump = 1; |
@@ -981,9 +981,9 @@ static int __init early_init_dt_scan_memory(unsigned long node, | |||
981 | } else if (strcmp(type, "memory") != 0) | 981 | } else if (strcmp(type, "memory") != 0) |
982 | return 0; | 982 | return 0; |
983 | 983 | ||
984 | reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); | 984 | reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); |
985 | if (reg == NULL) | 985 | if (reg == NULL) |
986 | reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); | 986 | reg = of_get_flat_dt_prop(node, "reg", &l); |
987 | if (reg == NULL) | 987 | if (reg == NULL) |
988 | return 0; | 988 | return 0; |
989 | 989 | ||
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 2445945d3761..7f1b33d5e30d 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1210,7 +1210,7 @@ static void __init prom_initialize_tce_table(void) | |||
1210 | /* Initialize the table to have a one-to-one mapping | 1210 | /* Initialize the table to have a one-to-one mapping |
1211 | * over the allocated size. | 1211 | * over the allocated size. |
1212 | */ | 1212 | */ |
1213 | tce_entryp = (unsigned long *)base; | 1213 | tce_entryp = (u64 *)base; |
1214 | for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { | 1214 | for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { |
1215 | tce_entry = (i << PAGE_SHIFT); | 1215 | tce_entry = (i << PAGE_SHIFT); |
1216 | tce_entry |= 0x3; | 1216 | tce_entry |= 0x3; |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 0c64f10087b9..4a2ee08af6a7 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
19 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
20 | 20 | ||
21 | #include "cacheinfo.h" | ||
22 | |||
21 | #ifdef CONFIG_PPC64 | 23 | #ifdef CONFIG_PPC64 |
22 | #include <asm/paca.h> | 24 | #include <asm/paca.h> |
23 | #include <asm/lppaca.h> | 25 | #include <asm/lppaca.h> |
@@ -25,8 +27,6 @@ | |||
25 | 27 | ||
26 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 28 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
27 | 29 | ||
28 | static DEFINE_PER_CPU(struct kobject *, cache_toplevel); | ||
29 | |||
30 | /* | 30 | /* |
31 | * SMT snooze delay stuff, 64-bit only for now | 31 | * SMT snooze delay stuff, 64-bit only for now |
32 | */ | 32 | */ |
@@ -343,283 +343,6 @@ static struct sysdev_attribute pa6t_attrs[] = { | |||
343 | #endif /* HAS_PPC_PMC_PA6T */ | 343 | #endif /* HAS_PPC_PMC_PA6T */ |
344 | #endif /* HAS_PPC_PMC_CLASSIC */ | 344 | #endif /* HAS_PPC_PMC_CLASSIC */ |
345 | 345 | ||
346 | struct cache_desc { | ||
347 | struct kobject kobj; | ||
348 | struct cache_desc *next; | ||
349 | const char *type; /* Instruction, Data, or Unified */ | ||
350 | u32 size; /* total cache size in KB */ | ||
351 | u32 line_size; /* in bytes */ | ||
352 | u32 nr_sets; /* number of sets */ | ||
353 | u32 level; /* e.g. 1, 2, 3... */ | ||
354 | u32 associativity; /* e.g. 8-way... 0 is fully associative */ | ||
355 | }; | ||
356 | |||
357 | DEFINE_PER_CPU(struct cache_desc *, cache_desc); | ||
358 | |||
359 | static struct cache_desc *kobj_to_cache_desc(struct kobject *k) | ||
360 | { | ||
361 | return container_of(k, struct cache_desc, kobj); | ||
362 | } | ||
363 | |||
364 | static void cache_desc_release(struct kobject *k) | ||
365 | { | ||
366 | struct cache_desc *desc = kobj_to_cache_desc(k); | ||
367 | |||
368 | pr_debug("%s: releasing %s\n", __func__, kobject_name(k)); | ||
369 | |||
370 | if (desc->next) | ||
371 | kobject_put(&desc->next->kobj); | ||
372 | |||
373 | kfree(kobj_to_cache_desc(k)); | ||
374 | } | ||
375 | |||
376 | static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf) | ||
377 | { | ||
378 | struct kobj_attribute *kobj_attr; | ||
379 | |||
380 | kobj_attr = container_of(attr, struct kobj_attribute, attr); | ||
381 | |||
382 | return kobj_attr->show(k, kobj_attr, buf); | ||
383 | } | ||
384 | |||
385 | static struct sysfs_ops cache_desc_sysfs_ops = { | ||
386 | .show = cache_desc_show, | ||
387 | }; | ||
388 | |||
389 | static struct kobj_type cache_desc_type = { | ||
390 | .release = cache_desc_release, | ||
391 | .sysfs_ops = &cache_desc_sysfs_ops, | ||
392 | }; | ||
393 | |||
394 | static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
395 | { | ||
396 | struct cache_desc *cache = kobj_to_cache_desc(k); | ||
397 | |||
398 | return sprintf(buf, "%uK\n", cache->size); | ||
399 | } | ||
400 | |||
401 | static struct kobj_attribute cache_size_attr = | ||
402 | __ATTR(size, 0444, cache_size_show, NULL); | ||
403 | |||
404 | static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
405 | { | ||
406 | struct cache_desc *cache = kobj_to_cache_desc(k); | ||
407 | |||
408 | return sprintf(buf, "%u\n", cache->line_size); | ||
409 | } | ||
410 | |||
411 | static struct kobj_attribute cache_line_size_attr = | ||
412 | __ATTR(coherency_line_size, 0444, cache_line_size_show, NULL); | ||
413 | |||
414 | static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
415 | { | ||
416 | struct cache_desc *cache = kobj_to_cache_desc(k); | ||
417 | |||
418 | return sprintf(buf, "%u\n", cache->nr_sets); | ||
419 | } | ||
420 | |||
421 | static struct kobj_attribute cache_nr_sets_attr = | ||
422 | __ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL); | ||
423 | |||
424 | static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
425 | { | ||
426 | struct cache_desc *cache = kobj_to_cache_desc(k); | ||
427 | |||
428 | return sprintf(buf, "%s\n", cache->type); | ||
429 | } | ||
430 | |||
431 | static struct kobj_attribute cache_type_attr = | ||
432 | __ATTR(type, 0444, cache_type_show, NULL); | ||
433 | |||
434 | static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
435 | { | ||
436 | struct cache_desc *cache = kobj_to_cache_desc(k); | ||
437 | |||
438 | return sprintf(buf, "%u\n", cache->level); | ||
439 | } | ||
440 | |||
441 | static struct kobj_attribute cache_level_attr = | ||
442 | __ATTR(level, 0444, cache_level_show, NULL); | ||
443 | |||
444 | static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | ||
445 | { | ||
446 | struct cache_desc *cache = kobj_to_cache_desc(k); | ||
447 | |||
448 | return sprintf(buf, "%u\n", cache->associativity); | ||
449 | } | ||
450 | |||
451 | static struct kobj_attribute cache_assoc_attr = | ||
452 | __ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL); | ||
453 | |||
454 | struct cache_desc_info { | ||
455 | const char *type; | ||
456 | const char *size_prop; | ||
457 | const char *line_size_prop; | ||
458 | const char *nr_sets_prop; | ||
459 | }; | ||
460 | |||
461 | /* PowerPC Processor binding says the [di]-cache-* must be equal on | ||
462 | * unified caches, so just use d-cache properties. */ | ||
463 | static struct cache_desc_info ucache_info = { | ||
464 | .type = "Unified", | ||
465 | .size_prop = "d-cache-size", | ||
466 | .line_size_prop = "d-cache-line-size", | ||
467 | .nr_sets_prop = "d-cache-sets", | ||
468 | }; | ||
469 | |||
470 | static struct cache_desc_info dcache_info = { | ||
471 | .type = "Data", | ||
472 | .size_prop = "d-cache-size", | ||
473 | .line_size_prop = "d-cache-line-size", | ||
474 | .nr_sets_prop = "d-cache-sets", | ||
475 | }; | ||
476 | |||
477 | static struct cache_desc_info icache_info = { | ||
478 | .type = "Instruction", | ||
479 | .size_prop = "i-cache-size", | ||
480 | .line_size_prop = "i-cache-line-size", | ||
481 | .nr_sets_prop = "i-cache-sets", | ||
482 | }; | ||
483 | |||
484 | static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info) | ||
485 | { | ||
486 | const u32 *cache_line_size; | ||
487 | struct cache_desc *new; | ||
488 | const u32 *cache_size; | ||
489 | const u32 *nr_sets; | ||
490 | int rc; | ||
491 | |||
492 | new = kzalloc(sizeof(*new), GFP_KERNEL); | ||
493 | if (!new) | ||
494 | return NULL; | ||
495 | |||
496 | rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent, | ||
497 | "index%d", index); | ||
498 | if (rc) | ||
499 | goto err; | ||
500 | |||
501 | /* type */ | ||
502 | new->type = info->type; | ||
503 | rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr); | ||
504 | WARN_ON(rc); | ||
505 | |||
506 | /* level */ | ||
507 | new->level = level; | ||
508 | rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr); | ||
509 | WARN_ON(rc); | ||
510 | |||
511 | /* size */ | ||
512 | cache_size = of_get_property(np, info->size_prop, NULL); | ||
513 | if (cache_size) { | ||
514 | new->size = *cache_size / 1024; | ||
515 | rc = sysfs_create_file(&new->kobj, | ||
516 | &cache_size_attr.attr); | ||
517 | WARN_ON(rc); | ||
518 | } | ||
519 | |||
520 | /* coherency_line_size */ | ||
521 | cache_line_size = of_get_property(np, info->line_size_prop, NULL); | ||
522 | if (cache_line_size) { | ||
523 | new->line_size = *cache_line_size; | ||
524 | rc = sysfs_create_file(&new->kobj, | ||
525 | &cache_line_size_attr.attr); | ||
526 | WARN_ON(rc); | ||
527 | } | ||
528 | |||
529 | /* number_of_sets */ | ||
530 | nr_sets = of_get_property(np, info->nr_sets_prop, NULL); | ||
531 | if (nr_sets) { | ||
532 | new->nr_sets = *nr_sets; | ||
533 | rc = sysfs_create_file(&new->kobj, | ||
534 | &cache_nr_sets_attr.attr); | ||
535 | WARN_ON(rc); | ||
536 | } | ||
537 | |||
538 | /* ways_of_associativity */ | ||
539 | if (new->nr_sets == 1) { | ||
540 | /* fully associative */ | ||
541 | new->associativity = 0; | ||
542 | goto create_assoc; | ||
543 | } | ||
544 | |||
545 | if (new->nr_sets && new->size && new->line_size) { | ||
546 | /* If we have values for all of these we can derive | ||
547 | * the associativity. */ | ||
548 | new->associativity = | ||
549 | ((new->size * 1024) / new->nr_sets) / new->line_size; | ||
550 | create_assoc: | ||
551 | rc = sysfs_create_file(&new->kobj, | ||
552 | &cache_assoc_attr.attr); | ||
553 | WARN_ON(rc); | ||
554 | } | ||
555 | |||
556 | return new; | ||
557 | err: | ||
558 | kfree(new); | ||
559 | return NULL; | ||
560 | } | ||
561 | |||
562 | static bool cache_is_unified(struct device_node *np) | ||
563 | { | ||
564 | return of_get_property(np, "cache-unified", NULL); | ||
565 | } | ||
566 | |||
567 | static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level) | ||
568 | { | ||
569 | struct device_node *next_cache; | ||
570 | struct cache_desc *new, **end; | ||
571 | |||
572 | pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index); | ||
573 | |||
574 | if (cache_is_unified(np)) { | ||
575 | new = create_cache_desc(np, parent, index, level, | ||
576 | &ucache_info); | ||
577 | } else { | ||
578 | new = create_cache_desc(np, parent, index, level, | ||
579 | &dcache_info); | ||
580 | if (new) { | ||
581 | index++; | ||
582 | new->next = create_cache_desc(np, parent, index, level, | ||
583 | &icache_info); | ||
584 | } | ||
585 | } | ||
586 | if (!new) | ||
587 | return NULL; | ||
588 | |||
589 | end = &new->next; | ||
590 | while (*end) | ||
591 | end = &(*end)->next; | ||
592 | |||
593 | next_cache = of_find_next_cache_node(np); | ||
594 | if (!next_cache) | ||
595 | goto out; | ||
596 | |||
597 | *end = create_cache_index_info(next_cache, parent, ++index, ++level); | ||
598 | |||
599 | of_node_put(next_cache); | ||
600 | out: | ||
601 | return new; | ||
602 | } | ||
603 | |||
604 | static void __cpuinit create_cache_info(struct sys_device *sysdev) | ||
605 | { | ||
606 | struct kobject *cache_toplevel; | ||
607 | struct device_node *np = NULL; | ||
608 | int cpu = sysdev->id; | ||
609 | |||
610 | cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj); | ||
611 | if (!cache_toplevel) | ||
612 | return; | ||
613 | per_cpu(cache_toplevel, cpu) = cache_toplevel; | ||
614 | np = of_get_cpu_node(cpu, NULL); | ||
615 | if (np != NULL) { | ||
616 | per_cpu(cache_desc, cpu) = | ||
617 | create_cache_index_info(np, cache_toplevel, 0, 1); | ||
618 | of_node_put(np); | ||
619 | } | ||
620 | return; | ||
621 | } | ||
622 | |||
623 | static void __cpuinit register_cpu_online(unsigned int cpu) | 346 | static void __cpuinit register_cpu_online(unsigned int cpu) |
624 | { | 347 | { |
625 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 348 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
@@ -684,25 +407,10 @@ static void __cpuinit register_cpu_online(unsigned int cpu) | |||
684 | sysdev_create_file(s, &attr_dscr); | 407 | sysdev_create_file(s, &attr_dscr); |
685 | #endif /* CONFIG_PPC64 */ | 408 | #endif /* CONFIG_PPC64 */ |
686 | 409 | ||
687 | create_cache_info(s); | 410 | cacheinfo_cpu_online(cpu); |
688 | } | 411 | } |
689 | 412 | ||
690 | #ifdef CONFIG_HOTPLUG_CPU | 413 | #ifdef CONFIG_HOTPLUG_CPU |
691 | static void remove_cache_info(struct sys_device *sysdev) | ||
692 | { | ||
693 | struct kobject *cache_toplevel; | ||
694 | struct cache_desc *cache_desc; | ||
695 | int cpu = sysdev->id; | ||
696 | |||
697 | cache_desc = per_cpu(cache_desc, cpu); | ||
698 | if (cache_desc != NULL) | ||
699 | kobject_put(&cache_desc->kobj); | ||
700 | |||
701 | cache_toplevel = per_cpu(cache_toplevel, cpu); | ||
702 | if (cache_toplevel != NULL) | ||
703 | kobject_put(cache_toplevel); | ||
704 | } | ||
705 | |||
706 | static void unregister_cpu_online(unsigned int cpu) | 414 | static void unregister_cpu_online(unsigned int cpu) |
707 | { | 415 | { |
708 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 416 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
@@ -769,7 +477,7 @@ static void unregister_cpu_online(unsigned int cpu) | |||
769 | sysdev_remove_file(s, &attr_dscr); | 477 | sysdev_remove_file(s, &attr_dscr); |
770 | #endif /* CONFIG_PPC64 */ | 478 | #endif /* CONFIG_PPC64 */ |
771 | 479 | ||
772 | remove_cache_info(s); | 480 | cacheinfo_cpu_offline(cpu); |
773 | } | 481 | } |
774 | #endif /* CONFIG_HOTPLUG_CPU */ | 482 | #endif /* CONFIG_HOTPLUG_CPU */ |
775 | 483 | ||