diff options
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 715 |
1 files changed, 198 insertions, 517 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 659643376dbf..edcb0e28c336 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -7,16 +7,14 @@ | |||
7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
12 | #include <linux/device.h> | 11 | #include <linux/cacheinfo.h> |
13 | #include <linux/compiler.h> | ||
14 | #include <linux/cpu.h> | 12 | #include <linux/cpu.h> |
15 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/sysfs.h> | ||
16 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
17 | 16 | ||
18 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
19 | #include <linux/smp.h> | ||
20 | #include <asm/amd_nb.h> | 18 | #include <asm/amd_nb.h> |
21 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
22 | 20 | ||
@@ -116,10 +114,10 @@ static const struct _cache_table cache_table[] = | |||
116 | 114 | ||
117 | 115 | ||
118 | enum _cache_type { | 116 | enum _cache_type { |
119 | CACHE_TYPE_NULL = 0, | 117 | CTYPE_NULL = 0, |
120 | CACHE_TYPE_DATA = 1, | 118 | CTYPE_DATA = 1, |
121 | CACHE_TYPE_INST = 2, | 119 | CTYPE_INST = 2, |
122 | CACHE_TYPE_UNIFIED = 3 | 120 | CTYPE_UNIFIED = 3 |
123 | }; | 121 | }; |
124 | 122 | ||
125 | union _cpuid4_leaf_eax { | 123 | union _cpuid4_leaf_eax { |
@@ -159,11 +157,6 @@ struct _cpuid4_info_regs { | |||
159 | struct amd_northbridge *nb; | 157 | struct amd_northbridge *nb; |
160 | }; | 158 | }; |
161 | 159 | ||
162 | struct _cpuid4_info { | ||
163 | struct _cpuid4_info_regs base; | ||
164 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); | ||
165 | }; | ||
166 | |||
167 | unsigned short num_cache_leaves; | 160 | unsigned short num_cache_leaves; |
168 | 161 | ||
169 | /* AMD doesn't have CPUID4. Emulate it here to report the same | 162 | /* AMD doesn't have CPUID4. Emulate it here to report the same |
@@ -220,6 +213,13 @@ static const unsigned short assocs[] = { | |||
220 | static const unsigned char levels[] = { 1, 1, 2, 3 }; | 213 | static const unsigned char levels[] = { 1, 1, 2, 3 }; |
221 | static const unsigned char types[] = { 1, 2, 3, 3 }; | 214 | static const unsigned char types[] = { 1, 2, 3, 3 }; |
222 | 215 | ||
216 | static const enum cache_type cache_type_map[] = { | ||
217 | [CTYPE_NULL] = CACHE_TYPE_NOCACHE, | ||
218 | [CTYPE_DATA] = CACHE_TYPE_DATA, | ||
219 | [CTYPE_INST] = CACHE_TYPE_INST, | ||
220 | [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED, | ||
221 | }; | ||
222 | |||
223 | static void | 223 | static void |
224 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 224 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
225 | union _cpuid4_leaf_ebx *ebx, | 225 | union _cpuid4_leaf_ebx *ebx, |
@@ -291,14 +291,8 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
291 | (ebx->split.ways_of_associativity + 1) - 1; | 291 | (ebx->split.ways_of_associativity + 1) - 1; |
292 | } | 292 | } |
293 | 293 | ||
294 | struct _cache_attr { | ||
295 | struct attribute attr; | ||
296 | ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); | ||
297 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, | ||
298 | unsigned int); | ||
299 | }; | ||
300 | |||
301 | #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) | 294 | #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) |
295 | |||
302 | /* | 296 | /* |
303 | * L3 cache descriptors | 297 | * L3 cache descriptors |
304 | */ | 298 | */ |
@@ -325,20 +319,6 @@ static void amd_calc_l3_indices(struct amd_northbridge *nb) | |||
325 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 319 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
326 | } | 320 | } |
327 | 321 | ||
328 | static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) | ||
329 | { | ||
330 | int node; | ||
331 | |||
332 | /* only for L3, and not in virtualized environments */ | ||
333 | if (index < 3) | ||
334 | return; | ||
335 | |||
336 | node = amd_get_nb_id(smp_processor_id()); | ||
337 | this_leaf->nb = node_to_amd_nb(node); | ||
338 | if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) | ||
339 | amd_calc_l3_indices(this_leaf->nb); | ||
340 | } | ||
341 | |||
342 | /* | 322 | /* |
343 | * check whether a slot used for disabling an L3 index is occupied. | 323 | * check whether a slot used for disabling an L3 index is occupied. |
344 | * @l3: L3 cache descriptor | 324 | * @l3: L3 cache descriptor |
@@ -359,15 +339,13 @@ int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) | |||
359 | return -1; | 339 | return -1; |
360 | } | 340 | } |
361 | 341 | ||
362 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | 342 | static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf, |
363 | unsigned int slot) | 343 | unsigned int slot) |
364 | { | 344 | { |
365 | int index; | 345 | int index; |
346 | struct amd_northbridge *nb = this_leaf->priv; | ||
366 | 347 | ||
367 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | 348 | index = amd_get_l3_disable_slot(nb, slot); |
368 | return -EINVAL; | ||
369 | |||
370 | index = amd_get_l3_disable_slot(this_leaf->base.nb, slot); | ||
371 | if (index >= 0) | 349 | if (index >= 0) |
372 | return sprintf(buf, "%d\n", index); | 350 | return sprintf(buf, "%d\n", index); |
373 | 351 | ||
@@ -376,9 +354,10 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | |||
376 | 354 | ||
377 | #define SHOW_CACHE_DISABLE(slot) \ | 355 | #define SHOW_CACHE_DISABLE(slot) \ |
378 | static ssize_t \ | 356 | static ssize_t \ |
379 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ | 357 | cache_disable_##slot##_show(struct device *dev, \ |
380 | unsigned int cpu) \ | 358 | struct device_attribute *attr, char *buf) \ |
381 | { \ | 359 | { \ |
360 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ | ||
382 | return show_cache_disable(this_leaf, buf, slot); \ | 361 | return show_cache_disable(this_leaf, buf, slot); \ |
383 | } | 362 | } |
384 | SHOW_CACHE_DISABLE(0) | 363 | SHOW_CACHE_DISABLE(0) |
@@ -446,25 +425,23 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, | |||
446 | return 0; | 425 | return 0; |
447 | } | 426 | } |
448 | 427 | ||
449 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | 428 | static ssize_t store_cache_disable(struct cacheinfo *this_leaf, |
450 | const char *buf, size_t count, | 429 | const char *buf, size_t count, |
451 | unsigned int slot) | 430 | unsigned int slot) |
452 | { | 431 | { |
453 | unsigned long val = 0; | 432 | unsigned long val = 0; |
454 | int cpu, err = 0; | 433 | int cpu, err = 0; |
434 | struct amd_northbridge *nb = this_leaf->priv; | ||
455 | 435 | ||
456 | if (!capable(CAP_SYS_ADMIN)) | 436 | if (!capable(CAP_SYS_ADMIN)) |
457 | return -EPERM; | 437 | return -EPERM; |
458 | 438 | ||
459 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | 439 | cpu = cpumask_first(&this_leaf->shared_cpu_map); |
460 | return -EINVAL; | ||
461 | |||
462 | cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | ||
463 | 440 | ||
464 | if (kstrtoul(buf, 10, &val) < 0) | 441 | if (kstrtoul(buf, 10, &val) < 0) |
465 | return -EINVAL; | 442 | return -EINVAL; |
466 | 443 | ||
467 | err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); | 444 | err = amd_set_l3_disable_slot(nb, cpu, slot, val); |
468 | if (err) { | 445 | if (err) { |
469 | if (err == -EEXIST) | 446 | if (err == -EEXIST) |
470 | pr_warning("L3 slot %d in use/index already disabled!\n", | 447 | pr_warning("L3 slot %d in use/index already disabled!\n", |
@@ -476,41 +453,36 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
476 | 453 | ||
477 | #define STORE_CACHE_DISABLE(slot) \ | 454 | #define STORE_CACHE_DISABLE(slot) \ |
478 | static ssize_t \ | 455 | static ssize_t \ |
479 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ | 456 | cache_disable_##slot##_store(struct device *dev, \ |
480 | const char *buf, size_t count, \ | 457 | struct device_attribute *attr, \ |
481 | unsigned int cpu) \ | 458 | const char *buf, size_t count) \ |
482 | { \ | 459 | { \ |
460 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ | ||
483 | return store_cache_disable(this_leaf, buf, count, slot); \ | 461 | return store_cache_disable(this_leaf, buf, count, slot); \ |
484 | } | 462 | } |
485 | STORE_CACHE_DISABLE(0) | 463 | STORE_CACHE_DISABLE(0) |
486 | STORE_CACHE_DISABLE(1) | 464 | STORE_CACHE_DISABLE(1) |
487 | 465 | ||
488 | static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | 466 | static ssize_t subcaches_show(struct device *dev, |
489 | show_cache_disable_0, store_cache_disable_0); | 467 | struct device_attribute *attr, char *buf) |
490 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | ||
491 | show_cache_disable_1, store_cache_disable_1); | ||
492 | |||
493 | static ssize_t | ||
494 | show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) | ||
495 | { | 468 | { |
496 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | 469 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); |
497 | return -EINVAL; | 470 | int cpu = cpumask_first(&this_leaf->shared_cpu_map); |
498 | 471 | ||
499 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); | 472 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); |
500 | } | 473 | } |
501 | 474 | ||
502 | static ssize_t | 475 | static ssize_t subcaches_store(struct device *dev, |
503 | store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | 476 | struct device_attribute *attr, |
504 | unsigned int cpu) | 477 | const char *buf, size_t count) |
505 | { | 478 | { |
479 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | ||
480 | int cpu = cpumask_first(&this_leaf->shared_cpu_map); | ||
506 | unsigned long val; | 481 | unsigned long val; |
507 | 482 | ||
508 | if (!capable(CAP_SYS_ADMIN)) | 483 | if (!capable(CAP_SYS_ADMIN)) |
509 | return -EPERM; | 484 | return -EPERM; |
510 | 485 | ||
511 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
512 | return -EINVAL; | ||
513 | |||
514 | if (kstrtoul(buf, 16, &val) < 0) | 486 | if (kstrtoul(buf, 16, &val) < 0) |
515 | return -EINVAL; | 487 | return -EINVAL; |
516 | 488 | ||
@@ -520,9 +492,92 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | |||
520 | return count; | 492 | return count; |
521 | } | 493 | } |
522 | 494 | ||
523 | static struct _cache_attr subcaches = | 495 | static DEVICE_ATTR_RW(cache_disable_0); |
524 | __ATTR(subcaches, 0644, show_subcaches, store_subcaches); | 496 | static DEVICE_ATTR_RW(cache_disable_1); |
497 | static DEVICE_ATTR_RW(subcaches); | ||
498 | |||
499 | static umode_t | ||
500 | cache_private_attrs_is_visible(struct kobject *kobj, | ||
501 | struct attribute *attr, int unused) | ||
502 | { | ||
503 | struct device *dev = kobj_to_dev(kobj); | ||
504 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | ||
505 | umode_t mode = attr->mode; | ||
506 | |||
507 | if (!this_leaf->priv) | ||
508 | return 0; | ||
509 | |||
510 | if ((attr == &dev_attr_subcaches.attr) && | ||
511 | amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
512 | return mode; | ||
513 | |||
514 | if ((attr == &dev_attr_cache_disable_0.attr || | ||
515 | attr == &dev_attr_cache_disable_1.attr) && | ||
516 | amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
517 | return mode; | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static struct attribute_group cache_private_group = { | ||
523 | .is_visible = cache_private_attrs_is_visible, | ||
524 | }; | ||
525 | |||
526 | static void init_amd_l3_attrs(void) | ||
527 | { | ||
528 | int n = 1; | ||
529 | static struct attribute **amd_l3_attrs; | ||
530 | |||
531 | if (amd_l3_attrs) /* already initialized */ | ||
532 | return; | ||
533 | |||
534 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
535 | n += 2; | ||
536 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
537 | n += 1; | ||
538 | |||
539 | amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL); | ||
540 | if (!amd_l3_attrs) | ||
541 | return; | ||
542 | |||
543 | n = 0; | ||
544 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { | ||
545 | amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr; | ||
546 | amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr; | ||
547 | } | ||
548 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
549 | amd_l3_attrs[n++] = &dev_attr_subcaches.attr; | ||
525 | 550 | ||
551 | cache_private_group.attrs = amd_l3_attrs; | ||
552 | } | ||
553 | |||
554 | const struct attribute_group * | ||
555 | cache_get_priv_group(struct cacheinfo *this_leaf) | ||
556 | { | ||
557 | struct amd_northbridge *nb = this_leaf->priv; | ||
558 | |||
559 | if (this_leaf->level < 3 || !nb) | ||
560 | return NULL; | ||
561 | |||
562 | if (nb && nb->l3_cache.indices) | ||
563 | init_amd_l3_attrs(); | ||
564 | |||
565 | return &cache_private_group; | ||
566 | } | ||
567 | |||
568 | static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) | ||
569 | { | ||
570 | int node; | ||
571 | |||
572 | /* only for L3, and not in virtualized environments */ | ||
573 | if (index < 3) | ||
574 | return; | ||
575 | |||
576 | node = amd_get_nb_id(smp_processor_id()); | ||
577 | this_leaf->nb = node_to_amd_nb(node); | ||
578 | if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) | ||
579 | amd_calc_l3_indices(this_leaf->nb); | ||
580 | } | ||
526 | #else | 581 | #else |
527 | #define amd_init_l3_cache(x, y) | 582 | #define amd_init_l3_cache(x, y) |
528 | #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ | 583 | #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ |
@@ -546,7 +601,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) | |||
546 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | 601 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); |
547 | } | 602 | } |
548 | 603 | ||
549 | if (eax.split.type == CACHE_TYPE_NULL) | 604 | if (eax.split.type == CTYPE_NULL) |
550 | return -EIO; /* better error ? */ | 605 | return -EIO; /* better error ? */ |
551 | 606 | ||
552 | this_leaf->eax = eax; | 607 | this_leaf->eax = eax; |
@@ -575,7 +630,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) | |||
575 | /* Do cpuid(op) loop to find out num_cache_leaves */ | 630 | /* Do cpuid(op) loop to find out num_cache_leaves */ |
576 | cpuid_count(op, i, &eax, &ebx, &ecx, &edx); | 631 | cpuid_count(op, i, &eax, &ebx, &ecx, &edx); |
577 | cache_eax.full = eax; | 632 | cache_eax.full = eax; |
578 | } while (cache_eax.split.type != CACHE_TYPE_NULL); | 633 | } while (cache_eax.split.type != CTYPE_NULL); |
579 | return i; | 634 | return i; |
580 | } | 635 | } |
581 | 636 | ||
@@ -626,9 +681,9 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
626 | 681 | ||
627 | switch (this_leaf.eax.split.level) { | 682 | switch (this_leaf.eax.split.level) { |
628 | case 1: | 683 | case 1: |
629 | if (this_leaf.eax.split.type == CACHE_TYPE_DATA) | 684 | if (this_leaf.eax.split.type == CTYPE_DATA) |
630 | new_l1d = this_leaf.size/1024; | 685 | new_l1d = this_leaf.size/1024; |
631 | else if (this_leaf.eax.split.type == CACHE_TYPE_INST) | 686 | else if (this_leaf.eax.split.type == CTYPE_INST) |
632 | new_l1i = this_leaf.size/1024; | 687 | new_l1i = this_leaf.size/1024; |
633 | break; | 688 | break; |
634 | case 2: | 689 | case 2: |
@@ -747,55 +802,52 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
747 | return l2; | 802 | return l2; |
748 | } | 803 | } |
749 | 804 | ||
750 | #ifdef CONFIG_SYSFS | 805 | static int __cache_amd_cpumap_setup(unsigned int cpu, int index, |
751 | 806 | struct _cpuid4_info_regs *base) | |
752 | /* pointer to _cpuid4_info array (for each cache leaf) */ | ||
753 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | ||
754 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) | ||
755 | |||
756 | #ifdef CONFIG_SMP | ||
757 | |||
758 | static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | ||
759 | { | 807 | { |
760 | struct _cpuid4_info *this_leaf; | 808 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
809 | struct cacheinfo *this_leaf; | ||
761 | int i, sibling; | 810 | int i, sibling; |
762 | 811 | ||
763 | if (cpu_has_topoext) { | 812 | if (cpu_has_topoext) { |
764 | unsigned int apicid, nshared, first, last; | 813 | unsigned int apicid, nshared, first, last; |
765 | 814 | ||
766 | if (!per_cpu(ici_cpuid4_info, cpu)) | 815 | this_leaf = this_cpu_ci->info_list + index; |
767 | return 0; | 816 | nshared = base->eax.split.num_threads_sharing + 1; |
768 | |||
769 | this_leaf = CPUID4_INFO_IDX(cpu, index); | ||
770 | nshared = this_leaf->base.eax.split.num_threads_sharing + 1; | ||
771 | apicid = cpu_data(cpu).apicid; | 817 | apicid = cpu_data(cpu).apicid; |
772 | first = apicid - (apicid % nshared); | 818 | first = apicid - (apicid % nshared); |
773 | last = first + nshared - 1; | 819 | last = first + nshared - 1; |
774 | 820 | ||
775 | for_each_online_cpu(i) { | 821 | for_each_online_cpu(i) { |
822 | this_cpu_ci = get_cpu_cacheinfo(i); | ||
823 | if (!this_cpu_ci->info_list) | ||
824 | continue; | ||
825 | |||
776 | apicid = cpu_data(i).apicid; | 826 | apicid = cpu_data(i).apicid; |
777 | if ((apicid < first) || (apicid > last)) | 827 | if ((apicid < first) || (apicid > last)) |
778 | continue; | 828 | continue; |
779 | if (!per_cpu(ici_cpuid4_info, i)) | 829 | |
780 | continue; | 830 | this_leaf = this_cpu_ci->info_list + index; |
781 | this_leaf = CPUID4_INFO_IDX(i, index); | ||
782 | 831 | ||
783 | for_each_online_cpu(sibling) { | 832 | for_each_online_cpu(sibling) { |
784 | apicid = cpu_data(sibling).apicid; | 833 | apicid = cpu_data(sibling).apicid; |
785 | if ((apicid < first) || (apicid > last)) | 834 | if ((apicid < first) || (apicid > last)) |
786 | continue; | 835 | continue; |
787 | set_bit(sibling, this_leaf->shared_cpu_map); | 836 | cpumask_set_cpu(sibling, |
837 | &this_leaf->shared_cpu_map); | ||
788 | } | 838 | } |
789 | } | 839 | } |
790 | } else if (index == 3) { | 840 | } else if (index == 3) { |
791 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { | 841 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { |
792 | if (!per_cpu(ici_cpuid4_info, i)) | 842 | this_cpu_ci = get_cpu_cacheinfo(i); |
843 | if (!this_cpu_ci->info_list) | ||
793 | continue; | 844 | continue; |
794 | this_leaf = CPUID4_INFO_IDX(i, index); | 845 | this_leaf = this_cpu_ci->info_list + index; |
795 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { | 846 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { |
796 | if (!cpu_online(sibling)) | 847 | if (!cpu_online(sibling)) |
797 | continue; | 848 | continue; |
798 | set_bit(sibling, this_leaf->shared_cpu_map); | 849 | cpumask_set_cpu(sibling, |
850 | &this_leaf->shared_cpu_map); | ||
799 | } | 851 | } |
800 | } | 852 | } |
801 | } else | 853 | } else |
@@ -804,457 +856,86 @@ static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | |||
804 | return 1; | 856 | return 1; |
805 | } | 857 | } |
806 | 858 | ||
807 | static void cache_shared_cpu_map_setup(unsigned int cpu, int index) | 859 | static void __cache_cpumap_setup(unsigned int cpu, int index, |
860 | struct _cpuid4_info_regs *base) | ||
808 | { | 861 | { |
809 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 862 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
863 | struct cacheinfo *this_leaf, *sibling_leaf; | ||
810 | unsigned long num_threads_sharing; | 864 | unsigned long num_threads_sharing; |
811 | int index_msb, i; | 865 | int index_msb, i; |
812 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 866 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
813 | 867 | ||
814 | if (c->x86_vendor == X86_VENDOR_AMD) { | 868 | if (c->x86_vendor == X86_VENDOR_AMD) { |
815 | if (cache_shared_amd_cpu_map_setup(cpu, index)) | 869 | if (__cache_amd_cpumap_setup(cpu, index, base)) |
816 | return; | 870 | return; |
817 | } | 871 | } |
818 | 872 | ||
819 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 873 | this_leaf = this_cpu_ci->info_list + index; |
820 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; | 874 | num_threads_sharing = 1 + base->eax.split.num_threads_sharing; |
821 | 875 | ||
876 | cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); | ||
822 | if (num_threads_sharing == 1) | 877 | if (num_threads_sharing == 1) |
823 | cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); | 878 | return; |
824 | else { | ||
825 | index_msb = get_count_order(num_threads_sharing); | ||
826 | |||
827 | for_each_online_cpu(i) { | ||
828 | if (cpu_data(i).apicid >> index_msb == | ||
829 | c->apicid >> index_msb) { | ||
830 | cpumask_set_cpu(i, | ||
831 | to_cpumask(this_leaf->shared_cpu_map)); | ||
832 | if (i != cpu && per_cpu(ici_cpuid4_info, i)) { | ||
833 | sibling_leaf = | ||
834 | CPUID4_INFO_IDX(i, index); | ||
835 | cpumask_set_cpu(cpu, to_cpumask( | ||
836 | sibling_leaf->shared_cpu_map)); | ||
837 | } | ||
838 | } | ||
839 | } | ||
840 | } | ||
841 | } | ||
842 | static void cache_remove_shared_cpu_map(unsigned int cpu, int index) | ||
843 | { | ||
844 | struct _cpuid4_info *this_leaf, *sibling_leaf; | ||
845 | int sibling; | ||
846 | |||
847 | this_leaf = CPUID4_INFO_IDX(cpu, index); | ||
848 | for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { | ||
849 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | ||
850 | cpumask_clear_cpu(cpu, | ||
851 | to_cpumask(sibling_leaf->shared_cpu_map)); | ||
852 | } | ||
853 | } | ||
854 | #else | ||
855 | static void cache_shared_cpu_map_setup(unsigned int cpu, int index) | ||
856 | { | ||
857 | } | ||
858 | |||
859 | static void cache_remove_shared_cpu_map(unsigned int cpu, int index) | ||
860 | { | ||
861 | } | ||
862 | #endif | ||
863 | |||
864 | static void free_cache_attributes(unsigned int cpu) | ||
865 | { | ||
866 | int i; | ||
867 | |||
868 | for (i = 0; i < num_cache_leaves; i++) | ||
869 | cache_remove_shared_cpu_map(cpu, i); | ||
870 | |||
871 | kfree(per_cpu(ici_cpuid4_info, cpu)); | ||
872 | per_cpu(ici_cpuid4_info, cpu) = NULL; | ||
873 | } | ||
874 | |||
875 | static void get_cpu_leaves(void *_retval) | ||
876 | { | ||
877 | int j, *retval = _retval, cpu = smp_processor_id(); | ||
878 | 879 | ||
879 | /* Do cpuid and store the results */ | 880 | index_msb = get_count_order(num_threads_sharing); |
880 | for (j = 0; j < num_cache_leaves; j++) { | ||
881 | struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j); | ||
882 | 881 | ||
883 | *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base); | 882 | for_each_online_cpu(i) |
884 | if (unlikely(*retval < 0)) { | 883 | if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) { |
885 | int i; | 884 | struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); |
886 | 885 | ||
887 | for (i = 0; i < j; i++) | 886 | if (i == cpu || !sib_cpu_ci->info_list) |
888 | cache_remove_shared_cpu_map(cpu, i); | 887 | continue;/* skip if itself or no cacheinfo */ |
889 | break; | 888 | sibling_leaf = sib_cpu_ci->info_list + index; |
889 | cpumask_set_cpu(i, &this_leaf->shared_cpu_map); | ||
890 | cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map); | ||
890 | } | 891 | } |
891 | cache_shared_cpu_map_setup(cpu, j); | ||
892 | } | ||
893 | } | 892 | } |
894 | 893 | ||
895 | static int detect_cache_attributes(unsigned int cpu) | 894 | static void ci_leaf_init(struct cacheinfo *this_leaf, |
895 | struct _cpuid4_info_regs *base) | ||
896 | { | 896 | { |
897 | int retval; | 897 | this_leaf->level = base->eax.split.level; |
898 | 898 | this_leaf->type = cache_type_map[base->eax.split.type]; | |
899 | if (num_cache_leaves == 0) | 899 | this_leaf->coherency_line_size = |
900 | return -ENOENT; | 900 | base->ebx.split.coherency_line_size + 1; |
901 | 901 | this_leaf->ways_of_associativity = | |
902 | per_cpu(ici_cpuid4_info, cpu) = kzalloc( | 902 | base->ebx.split.ways_of_associativity + 1; |
903 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | 903 | this_leaf->size = base->size; |
904 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) | 904 | this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1; |
905 | return -ENOMEM; | 905 | this_leaf->physical_line_partition = |
906 | 906 | base->ebx.split.physical_line_partition + 1; | |
907 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); | 907 | this_leaf->priv = base->nb; |
908 | if (retval) { | ||
909 | kfree(per_cpu(ici_cpuid4_info, cpu)); | ||
910 | per_cpu(ici_cpuid4_info, cpu) = NULL; | ||
911 | } | ||
912 | |||
913 | return retval; | ||
914 | } | 908 | } |
915 | 909 | ||
916 | #include <linux/kobject.h> | 910 | static int __init_cache_level(unsigned int cpu) |
917 | #include <linux/sysfs.h> | ||
918 | #include <linux/cpu.h> | ||
919 | |||
920 | /* pointer to kobject for cpuX/cache */ | ||
921 | static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); | ||
922 | |||
923 | struct _index_kobject { | ||
924 | struct kobject kobj; | ||
925 | unsigned int cpu; | ||
926 | unsigned short index; | ||
927 | }; | ||
928 | |||
929 | /* pointer to array of kobjects for cpuX/cache/indexY */ | ||
930 | static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); | ||
931 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) | ||
932 | |||
933 | #define show_one_plus(file_name, object, val) \ | ||
934 | static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ | ||
935 | unsigned int cpu) \ | ||
936 | { \ | ||
937 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | ||
938 | } | ||
939 | |||
940 | show_one_plus(level, base.eax.split.level, 0); | ||
941 | show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1); | ||
942 | show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1); | ||
943 | show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1); | ||
944 | show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1); | ||
945 | |||
946 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, | ||
947 | unsigned int cpu) | ||
948 | { | ||
949 | return sprintf(buf, "%luK\n", this_leaf->base.size / 1024); | ||
950 | } | ||
951 | |||
952 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | ||
953 | int type, char *buf) | ||
954 | { | ||
955 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); | ||
956 | int ret; | ||
957 | |||
958 | if (type) | ||
959 | ret = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", | ||
960 | cpumask_pr_args(mask)); | ||
961 | else | ||
962 | ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb", | ||
963 | cpumask_pr_args(mask)); | ||
964 | buf[ret++] = '\n'; | ||
965 | buf[ret] = '\0'; | ||
966 | return ret; | ||
967 | } | ||
968 | |||
969 | static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, | ||
970 | unsigned int cpu) | ||
971 | { | 911 | { |
972 | return show_shared_cpu_map_func(leaf, 0, buf); | 912 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
973 | } | ||
974 | |||
975 | static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, | ||
976 | unsigned int cpu) | ||
977 | { | ||
978 | return show_shared_cpu_map_func(leaf, 1, buf); | ||
979 | } | ||
980 | 913 | ||
981 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, | 914 | if (!num_cache_leaves) |
982 | unsigned int cpu) | ||
983 | { | ||
984 | switch (this_leaf->base.eax.split.type) { | ||
985 | case CACHE_TYPE_DATA: | ||
986 | return sprintf(buf, "Data\n"); | ||
987 | case CACHE_TYPE_INST: | ||
988 | return sprintf(buf, "Instruction\n"); | ||
989 | case CACHE_TYPE_UNIFIED: | ||
990 | return sprintf(buf, "Unified\n"); | ||
991 | default: | ||
992 | return sprintf(buf, "Unknown\n"); | ||
993 | } | ||
994 | } | ||
995 | |||
996 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
997 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
998 | |||
999 | #define define_one_ro(_name) \ | ||
1000 | static struct _cache_attr _name = \ | ||
1001 | __ATTR(_name, 0444, show_##_name, NULL) | ||
1002 | |||
1003 | define_one_ro(level); | ||
1004 | define_one_ro(type); | ||
1005 | define_one_ro(coherency_line_size); | ||
1006 | define_one_ro(physical_line_partition); | ||
1007 | define_one_ro(ways_of_associativity); | ||
1008 | define_one_ro(number_of_sets); | ||
1009 | define_one_ro(size); | ||
1010 | define_one_ro(shared_cpu_map); | ||
1011 | define_one_ro(shared_cpu_list); | ||
1012 | |||
1013 | static struct attribute *default_attrs[] = { | ||
1014 | &type.attr, | ||
1015 | &level.attr, | ||
1016 | &coherency_line_size.attr, | ||
1017 | &physical_line_partition.attr, | ||
1018 | &ways_of_associativity.attr, | ||
1019 | &number_of_sets.attr, | ||
1020 | &size.attr, | ||
1021 | &shared_cpu_map.attr, | ||
1022 | &shared_cpu_list.attr, | ||
1023 | NULL | ||
1024 | }; | ||
1025 | |||
1026 | #ifdef CONFIG_AMD_NB | ||
1027 | static struct attribute **amd_l3_attrs(void) | ||
1028 | { | ||
1029 | static struct attribute **attrs; | ||
1030 | int n; | ||
1031 | |||
1032 | if (attrs) | ||
1033 | return attrs; | ||
1034 | |||
1035 | n = ARRAY_SIZE(default_attrs); | ||
1036 | |||
1037 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
1038 | n += 2; | ||
1039 | |||
1040 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1041 | n += 1; | ||
1042 | |||
1043 | attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); | ||
1044 | if (attrs == NULL) | ||
1045 | return attrs = default_attrs; | ||
1046 | |||
1047 | for (n = 0; default_attrs[n]; n++) | ||
1048 | attrs[n] = default_attrs[n]; | ||
1049 | |||
1050 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { | ||
1051 | attrs[n++] = &cache_disable_0.attr; | ||
1052 | attrs[n++] = &cache_disable_1.attr; | ||
1053 | } | ||
1054 | |||
1055 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1056 | attrs[n++] = &subcaches.attr; | ||
1057 | |||
1058 | return attrs; | ||
1059 | } | ||
1060 | #endif | ||
1061 | |||
1062 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | ||
1063 | { | ||
1064 | struct _cache_attr *fattr = to_attr(attr); | ||
1065 | struct _index_kobject *this_leaf = to_object(kobj); | ||
1066 | ssize_t ret; | ||
1067 | |||
1068 | ret = fattr->show ? | ||
1069 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | ||
1070 | buf, this_leaf->cpu) : | ||
1071 | 0; | ||
1072 | return ret; | ||
1073 | } | ||
1074 | |||
1075 | static ssize_t store(struct kobject *kobj, struct attribute *attr, | ||
1076 | const char *buf, size_t count) | ||
1077 | { | ||
1078 | struct _cache_attr *fattr = to_attr(attr); | ||
1079 | struct _index_kobject *this_leaf = to_object(kobj); | ||
1080 | ssize_t ret; | ||
1081 | |||
1082 | ret = fattr->store ? | ||
1083 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | ||
1084 | buf, count, this_leaf->cpu) : | ||
1085 | 0; | ||
1086 | return ret; | ||
1087 | } | ||
1088 | |||
1089 | static const struct sysfs_ops sysfs_ops = { | ||
1090 | .show = show, | ||
1091 | .store = store, | ||
1092 | }; | ||
1093 | |||
1094 | static struct kobj_type ktype_cache = { | ||
1095 | .sysfs_ops = &sysfs_ops, | ||
1096 | .default_attrs = default_attrs, | ||
1097 | }; | ||
1098 | |||
1099 | static struct kobj_type ktype_percpu_entry = { | ||
1100 | .sysfs_ops = &sysfs_ops, | ||
1101 | }; | ||
1102 | |||
1103 | static void cpuid4_cache_sysfs_exit(unsigned int cpu) | ||
1104 | { | ||
1105 | kfree(per_cpu(ici_cache_kobject, cpu)); | ||
1106 | kfree(per_cpu(ici_index_kobject, cpu)); | ||
1107 | per_cpu(ici_cache_kobject, cpu) = NULL; | ||
1108 | per_cpu(ici_index_kobject, cpu) = NULL; | ||
1109 | free_cache_attributes(cpu); | ||
1110 | } | ||
1111 | |||
1112 | static int cpuid4_cache_sysfs_init(unsigned int cpu) | ||
1113 | { | ||
1114 | int err; | ||
1115 | |||
1116 | if (num_cache_leaves == 0) | ||
1117 | return -ENOENT; | 915 | return -ENOENT; |
1118 | 916 | if (!this_cpu_ci) | |
1119 | err = detect_cache_attributes(cpu); | 917 | return -EINVAL; |
1120 | if (err) | 918 | this_cpu_ci->num_levels = 3; |
1121 | return err; | 919 | this_cpu_ci->num_leaves = num_cache_leaves; |
1122 | |||
1123 | /* Allocate all required memory */ | ||
1124 | per_cpu(ici_cache_kobject, cpu) = | ||
1125 | kzalloc(sizeof(struct kobject), GFP_KERNEL); | ||
1126 | if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL)) | ||
1127 | goto err_out; | ||
1128 | |||
1129 | per_cpu(ici_index_kobject, cpu) = kzalloc( | ||
1130 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); | ||
1131 | if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL)) | ||
1132 | goto err_out; | ||
1133 | |||
1134 | return 0; | 920 | return 0; |
1135 | |||
1136 | err_out: | ||
1137 | cpuid4_cache_sysfs_exit(cpu); | ||
1138 | return -ENOMEM; | ||
1139 | } | 921 | } |
1140 | 922 | ||
1141 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); | 923 | static int __populate_cache_leaves(unsigned int cpu) |
1142 | |||
1143 | /* Add/Remove cache interface for CPU device */ | ||
1144 | static int cache_add_dev(struct device *dev) | ||
1145 | { | 924 | { |
1146 | unsigned int cpu = dev->id; | 925 | unsigned int idx, ret; |
1147 | unsigned long i, j; | 926 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
1148 | struct _index_kobject *this_object; | 927 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; |
1149 | struct _cpuid4_info *this_leaf; | 928 | struct _cpuid4_info_regs id4_regs = {}; |
1150 | int retval; | ||
1151 | |||
1152 | retval = cpuid4_cache_sysfs_init(cpu); | ||
1153 | if (unlikely(retval < 0)) | ||
1154 | return retval; | ||
1155 | |||
1156 | retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), | ||
1157 | &ktype_percpu_entry, | ||
1158 | &dev->kobj, "%s", "cache"); | ||
1159 | if (retval < 0) { | ||
1160 | cpuid4_cache_sysfs_exit(cpu); | ||
1161 | return retval; | ||
1162 | } | ||
1163 | 929 | ||
1164 | for (i = 0; i < num_cache_leaves; i++) { | 930 | for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { |
1165 | this_object = INDEX_KOBJECT_PTR(cpu, i); | 931 | ret = cpuid4_cache_lookup_regs(idx, &id4_regs); |
1166 | this_object->cpu = cpu; | 932 | if (ret) |
1167 | this_object->index = i; | 933 | return ret; |
1168 | 934 | ci_leaf_init(this_leaf++, &id4_regs); | |
1169 | this_leaf = CPUID4_INFO_IDX(cpu, i); | 935 | __cache_cpumap_setup(cpu, idx, &id4_regs); |
1170 | |||
1171 | ktype_cache.default_attrs = default_attrs; | ||
1172 | #ifdef CONFIG_AMD_NB | ||
1173 | if (this_leaf->base.nb) | ||
1174 | ktype_cache.default_attrs = amd_l3_attrs(); | ||
1175 | #endif | ||
1176 | retval = kobject_init_and_add(&(this_object->kobj), | ||
1177 | &ktype_cache, | ||
1178 | per_cpu(ici_cache_kobject, cpu), | ||
1179 | "index%1lu", i); | ||
1180 | if (unlikely(retval)) { | ||
1181 | for (j = 0; j < i; j++) | ||
1182 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); | ||
1183 | kobject_put(per_cpu(ici_cache_kobject, cpu)); | ||
1184 | cpuid4_cache_sysfs_exit(cpu); | ||
1185 | return retval; | ||
1186 | } | ||
1187 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); | ||
1188 | } | 936 | } |
1189 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); | ||
1190 | |||
1191 | kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD); | ||
1192 | return 0; | 937 | return 0; |
1193 | } | 938 | } |
1194 | 939 | ||
1195 | static void cache_remove_dev(struct device *dev) | 940 | DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level) |
1196 | { | 941 | DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves) |
1197 | unsigned int cpu = dev->id; | ||
1198 | unsigned long i; | ||
1199 | |||
1200 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) | ||
1201 | return; | ||
1202 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) | ||
1203 | return; | ||
1204 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); | ||
1205 | |||
1206 | for (i = 0; i < num_cache_leaves; i++) | ||
1207 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); | ||
1208 | kobject_put(per_cpu(ici_cache_kobject, cpu)); | ||
1209 | cpuid4_cache_sysfs_exit(cpu); | ||
1210 | } | ||
1211 | |||
1212 | static int cacheinfo_cpu_callback(struct notifier_block *nfb, | ||
1213 | unsigned long action, void *hcpu) | ||
1214 | { | ||
1215 | unsigned int cpu = (unsigned long)hcpu; | ||
1216 | struct device *dev; | ||
1217 | |||
1218 | dev = get_cpu_device(cpu); | ||
1219 | switch (action) { | ||
1220 | case CPU_ONLINE: | ||
1221 | case CPU_ONLINE_FROZEN: | ||
1222 | cache_add_dev(dev); | ||
1223 | break; | ||
1224 | case CPU_DEAD: | ||
1225 | case CPU_DEAD_FROZEN: | ||
1226 | cache_remove_dev(dev); | ||
1227 | break; | ||
1228 | } | ||
1229 | return NOTIFY_OK; | ||
1230 | } | ||
1231 | |||
1232 | static struct notifier_block cacheinfo_cpu_notifier = { | ||
1233 | .notifier_call = cacheinfo_cpu_callback, | ||
1234 | }; | ||
1235 | |||
1236 | static int __init cache_sysfs_init(void) | ||
1237 | { | ||
1238 | int i, err = 0; | ||
1239 | |||
1240 | if (num_cache_leaves == 0) | ||
1241 | return 0; | ||
1242 | |||
1243 | cpu_notifier_register_begin(); | ||
1244 | for_each_online_cpu(i) { | ||
1245 | struct device *dev = get_cpu_device(i); | ||
1246 | |||
1247 | err = cache_add_dev(dev); | ||
1248 | if (err) | ||
1249 | goto out; | ||
1250 | } | ||
1251 | __register_hotcpu_notifier(&cacheinfo_cpu_notifier); | ||
1252 | |||
1253 | out: | ||
1254 | cpu_notifier_register_done(); | ||
1255 | return err; | ||
1256 | } | ||
1257 | |||
1258 | device_initcall(cache_sysfs_init); | ||
1259 | |||
1260 | #endif | ||