diff options
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 70 |
1 files changed, 40 insertions, 30 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 1b889860eb73..2e8b323b34e4 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -129,7 +129,7 @@ struct _cpuid4_info { | |||
129 | union _cpuid4_leaf_ebx ebx; | 129 | union _cpuid4_leaf_ebx ebx; |
130 | union _cpuid4_leaf_ecx ecx; | 130 | union _cpuid4_leaf_ecx ecx; |
131 | unsigned long size; | 131 | unsigned long size; |
132 | cpumask_t shared_cpu_map; | 132 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ |
133 | }; | 133 | }; |
134 | 134 | ||
135 | unsigned short num_cache_leaves; | 135 | unsigned short num_cache_leaves; |
@@ -451,8 +451,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
451 | } | 451 | } |
452 | 452 | ||
453 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 453 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
454 | static struct _cpuid4_info *cpuid4_info[NR_CPUS]; | 454 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
455 | #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) | 455 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
456 | 456 | ||
457 | #ifdef CONFIG_SMP | 457 | #ifdef CONFIG_SMP |
458 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 458 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
@@ -474,7 +474,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
474 | if (cpu_data(i).apicid >> index_msb == | 474 | if (cpu_data(i).apicid >> index_msb == |
475 | c->apicid >> index_msb) { | 475 | c->apicid >> index_msb) { |
476 | cpu_set(i, this_leaf->shared_cpu_map); | 476 | cpu_set(i, this_leaf->shared_cpu_map); |
477 | if (i != cpu && cpuid4_info[i]) { | 477 | if (i != cpu && per_cpu(cpuid4_info, i)) { |
478 | sibling_leaf = CPUID4_INFO_IDX(i, index); | 478 | sibling_leaf = CPUID4_INFO_IDX(i, index); |
479 | cpu_set(cpu, sibling_leaf->shared_cpu_map); | 479 | cpu_set(cpu, sibling_leaf->shared_cpu_map); |
480 | } | 480 | } |
@@ -505,8 +505,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
505 | for (i = 0; i < num_cache_leaves; i++) | 505 | for (i = 0; i < num_cache_leaves; i++) |
506 | cache_remove_shared_cpu_map(cpu, i); | 506 | cache_remove_shared_cpu_map(cpu, i); |
507 | 507 | ||
508 | kfree(cpuid4_info[cpu]); | 508 | kfree(per_cpu(cpuid4_info, cpu)); |
509 | cpuid4_info[cpu] = NULL; | 509 | per_cpu(cpuid4_info, cpu) = NULL; |
510 | } | 510 | } |
511 | 511 | ||
512 | static int __cpuinit detect_cache_attributes(unsigned int cpu) | 512 | static int __cpuinit detect_cache_attributes(unsigned int cpu) |
@@ -519,9 +519,9 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
519 | if (num_cache_leaves == 0) | 519 | if (num_cache_leaves == 0) |
520 | return -ENOENT; | 520 | return -ENOENT; |
521 | 521 | ||
522 | cpuid4_info[cpu] = kzalloc( | 522 | per_cpu(cpuid4_info, cpu) = kzalloc( |
523 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | 523 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); |
524 | if (cpuid4_info[cpu] == NULL) | 524 | if (per_cpu(cpuid4_info, cpu) == NULL) |
525 | return -ENOMEM; | 525 | return -ENOMEM; |
526 | 526 | ||
527 | oldmask = current->cpus_allowed; | 527 | oldmask = current->cpus_allowed; |
@@ -546,8 +546,8 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
546 | 546 | ||
547 | out: | 547 | out: |
548 | if (retval) { | 548 | if (retval) { |
549 | kfree(cpuid4_info[cpu]); | 549 | kfree(per_cpu(cpuid4_info, cpu)); |
550 | cpuid4_info[cpu] = NULL; | 550 | per_cpu(cpuid4_info, cpu) = NULL; |
551 | } | 551 | } |
552 | 552 | ||
553 | return retval; | 553 | return retval; |
@@ -561,7 +561,7 @@ out: | |||
561 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ | 561 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ |
562 | 562 | ||
563 | /* pointer to kobject for cpuX/cache */ | 563 | /* pointer to kobject for cpuX/cache */ |
564 | static struct kobject * cache_kobject[NR_CPUS]; | 564 | static DEFINE_PER_CPU(struct kobject *, cache_kobject); |
565 | 565 | ||
566 | struct _index_kobject { | 566 | struct _index_kobject { |
567 | struct kobject kobj; | 567 | struct kobject kobj; |
@@ -570,8 +570,8 @@ struct _index_kobject { | |||
570 | }; | 570 | }; |
571 | 571 | ||
572 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 572 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
573 | static struct _index_kobject *index_kobject[NR_CPUS]; | 573 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); |
574 | #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y])) | 574 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) |
575 | 575 | ||
576 | #define show_one_plus(file_name, object, val) \ | 576 | #define show_one_plus(file_name, object, val) \ |
577 | static ssize_t show_##file_name \ | 577 | static ssize_t show_##file_name \ |
@@ -593,9 +593,16 @@ static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) | |||
593 | 593 | ||
594 | static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf) | 594 | static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf) |
595 | { | 595 | { |
596 | char mask_str[NR_CPUS]; | 596 | int n = 0; |
597 | cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map); | 597 | int len = cpumask_scnprintf_len(nr_cpu_ids); |
598 | return sprintf(buf, "%s\n", mask_str); | 598 | char *mask_str = kmalloc(len, GFP_KERNEL); |
599 | |||
600 | if (mask_str) { | ||
601 | cpumask_scnprintf(mask_str, len, this_leaf->shared_cpu_map); | ||
602 | n = sprintf(buf, "%s\n", mask_str); | ||
603 | kfree(mask_str); | ||
604 | } | ||
605 | return n; | ||
599 | } | 606 | } |
600 | 607 | ||
601 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { | 608 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { |
@@ -684,10 +691,10 @@ static struct kobj_type ktype_percpu_entry = { | |||
684 | 691 | ||
685 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) | 692 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) |
686 | { | 693 | { |
687 | kfree(cache_kobject[cpu]); | 694 | kfree(per_cpu(cache_kobject, cpu)); |
688 | kfree(index_kobject[cpu]); | 695 | kfree(per_cpu(index_kobject, cpu)); |
689 | cache_kobject[cpu] = NULL; | 696 | per_cpu(cache_kobject, cpu) = NULL; |
690 | index_kobject[cpu] = NULL; | 697 | per_cpu(index_kobject, cpu) = NULL; |
691 | free_cache_attributes(cpu); | 698 | free_cache_attributes(cpu); |
692 | } | 699 | } |
693 | 700 | ||
@@ -703,13 +710,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | |||
703 | return err; | 710 | return err; |
704 | 711 | ||
705 | /* Allocate all required memory */ | 712 | /* Allocate all required memory */ |
706 | cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); | 713 | per_cpu(cache_kobject, cpu) = |
707 | if (unlikely(cache_kobject[cpu] == NULL)) | 714 | kzalloc(sizeof(struct kobject), GFP_KERNEL); |
715 | if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) | ||
708 | goto err_out; | 716 | goto err_out; |
709 | 717 | ||
710 | index_kobject[cpu] = kzalloc( | 718 | per_cpu(index_kobject, cpu) = kzalloc( |
711 | sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); | 719 | sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); |
712 | if (unlikely(index_kobject[cpu] == NULL)) | 720 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) |
713 | goto err_out; | 721 | goto err_out; |
714 | 722 | ||
715 | return 0; | 723 | return 0; |
@@ -733,7 +741,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
733 | if (unlikely(retval < 0)) | 741 | if (unlikely(retval < 0)) |
734 | return retval; | 742 | return retval; |
735 | 743 | ||
736 | retval = kobject_init_and_add(cache_kobject[cpu], &ktype_percpu_entry, | 744 | retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), |
745 | &ktype_percpu_entry, | ||
737 | &sys_dev->kobj, "%s", "cache"); | 746 | &sys_dev->kobj, "%s", "cache"); |
738 | if (retval < 0) { | 747 | if (retval < 0) { |
739 | cpuid4_cache_sysfs_exit(cpu); | 748 | cpuid4_cache_sysfs_exit(cpu); |
@@ -745,13 +754,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
745 | this_object->cpu = cpu; | 754 | this_object->cpu = cpu; |
746 | this_object->index = i; | 755 | this_object->index = i; |
747 | retval = kobject_init_and_add(&(this_object->kobj), | 756 | retval = kobject_init_and_add(&(this_object->kobj), |
748 | &ktype_cache, cache_kobject[cpu], | 757 | &ktype_cache, |
758 | per_cpu(cache_kobject, cpu), | ||
749 | "index%1lu", i); | 759 | "index%1lu", i); |
750 | if (unlikely(retval)) { | 760 | if (unlikely(retval)) { |
751 | for (j = 0; j < i; j++) { | 761 | for (j = 0; j < i; j++) { |
752 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); | 762 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); |
753 | } | 763 | } |
754 | kobject_put(cache_kobject[cpu]); | 764 | kobject_put(per_cpu(cache_kobject, cpu)); |
755 | cpuid4_cache_sysfs_exit(cpu); | 765 | cpuid4_cache_sysfs_exit(cpu); |
756 | break; | 766 | break; |
757 | } | 767 | } |
@@ -760,7 +770,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
760 | if (!retval) | 770 | if (!retval) |
761 | cpu_set(cpu, cache_dev_map); | 771 | cpu_set(cpu, cache_dev_map); |
762 | 772 | ||
763 | kobject_uevent(cache_kobject[cpu], KOBJ_ADD); | 773 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); |
764 | return retval; | 774 | return retval; |
765 | } | 775 | } |
766 | 776 | ||
@@ -769,7 +779,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
769 | unsigned int cpu = sys_dev->id; | 779 | unsigned int cpu = sys_dev->id; |
770 | unsigned long i; | 780 | unsigned long i; |
771 | 781 | ||
772 | if (cpuid4_info[cpu] == NULL) | 782 | if (per_cpu(cpuid4_info, cpu) == NULL) |
773 | return; | 783 | return; |
774 | if (!cpu_isset(cpu, cache_dev_map)) | 784 | if (!cpu_isset(cpu, cache_dev_map)) |
775 | return; | 785 | return; |
@@ -777,7 +787,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
777 | 787 | ||
778 | for (i = 0; i < num_cache_leaves; i++) | 788 | for (i = 0; i < num_cache_leaves; i++) |
779 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 789 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
780 | kobject_put(cache_kobject[cpu]); | 790 | kobject_put(per_cpu(cache_kobject, cpu)); |
781 | cpuid4_cache_sysfs_exit(cpu); | 791 | cpuid4_cache_sysfs_exit(cpu); |
782 | } | 792 | } |
783 | 793 | ||