diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-03-23 09:50:03 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2009-03-23 16:20:20 -0400 |
| commit | 80c5520811d3805adcb15c570ea5e2d489fa5d0b (patch) | |
| tree | ae797a7f4af39f80e77526533d06ac23b439f0ab /arch/x86/kernel/cpu/intel_cacheinfo.c | |
| parent | b3e3b302cf6dc8d60b67f0e84d1fa5648889c038 (diff) | |
| parent | 8c083f081d0014057901c68a0a3e0f8ca7ac8d23 (diff) | |
Merge branch 'cpus4096' into irq/threaded
Conflicts:
arch/parisc/kernel/irq.c
kernel/irq/handle.c
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
| -rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 69 |
1 files changed, 47 insertions, 22 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index da299eb85fc..8e6ce2c146d 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -147,10 +147,19 @@ struct _cpuid4_info { | |||
| 147 | union _cpuid4_leaf_ecx ecx; | 147 | union _cpuid4_leaf_ecx ecx; |
| 148 | unsigned long size; | 148 | unsigned long size; |
| 149 | unsigned long can_disable; | 149 | unsigned long can_disable; |
| 150 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ | 150 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); |
| 151 | }; | 151 | }; |
| 152 | 152 | ||
| 153 | #ifdef CONFIG_PCI | 153 | /* subset of above _cpuid4_info w/o shared_cpu_map */ |
| 154 | struct _cpuid4_info_regs { | ||
| 155 | union _cpuid4_leaf_eax eax; | ||
| 156 | union _cpuid4_leaf_ebx ebx; | ||
| 157 | union _cpuid4_leaf_ecx ecx; | ||
| 158 | unsigned long size; | ||
| 159 | unsigned long can_disable; | ||
| 160 | }; | ||
| 161 | |||
| 162 | #if defined(CONFIG_PCI) && defined(CONFIG_SYSFS) | ||
| 154 | static struct pci_device_id k8_nb_id[] = { | 163 | static struct pci_device_id k8_nb_id[] = { |
| 155 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | 164 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, |
| 156 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | 165 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, |
| @@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
| 278 | } | 287 | } |
| 279 | 288 | ||
| 280 | static void __cpuinit | 289 | static void __cpuinit |
| 281 | amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | 290 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) |
| 282 | { | 291 | { |
| 283 | if (index < 3) | 292 | if (index < 3) |
| 284 | return; | 293 | return; |
| @@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | |||
| 286 | } | 295 | } |
| 287 | 296 | ||
| 288 | static int | 297 | static int |
| 289 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 298 | __cpuinit cpuid4_cache_lookup_regs(int index, |
| 299 | struct _cpuid4_info_regs *this_leaf) | ||
| 290 | { | 300 | { |
| 291 | union _cpuid4_leaf_eax eax; | 301 | union _cpuid4_leaf_eax eax; |
| 292 | union _cpuid4_leaf_ebx ebx; | 302 | union _cpuid4_leaf_ebx ebx; |
| @@ -353,11 +363,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 353 | * parameters cpuid leaf to find the cache details | 363 | * parameters cpuid leaf to find the cache details |
| 354 | */ | 364 | */ |
| 355 | for (i = 0; i < num_cache_leaves; i++) { | 365 | for (i = 0; i < num_cache_leaves; i++) { |
| 356 | struct _cpuid4_info this_leaf; | 366 | struct _cpuid4_info_regs this_leaf; |
| 357 | |||
| 358 | int retval; | 367 | int retval; |
| 359 | 368 | ||
| 360 | retval = cpuid4_cache_lookup(i, &this_leaf); | 369 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
| 361 | if (retval >= 0) { | 370 | if (retval >= 0) { |
| 362 | switch(this_leaf.eax.split.level) { | 371 | switch(this_leaf.eax.split.level) { |
| 363 | case 1: | 372 | case 1: |
| @@ -490,6 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 490 | return l2; | 499 | return l2; |
| 491 | } | 500 | } |
| 492 | 501 | ||
| 502 | #ifdef CONFIG_SYSFS | ||
| 503 | |||
| 493 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 504 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
| 494 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 505 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
| 495 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 506 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
| @@ -506,17 +517,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
| 506 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | 517 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; |
| 507 | 518 | ||
| 508 | if (num_threads_sharing == 1) | 519 | if (num_threads_sharing == 1) |
| 509 | cpu_set(cpu, this_leaf->shared_cpu_map); | 520 | cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); |
| 510 | else { | 521 | else { |
| 511 | index_msb = get_count_order(num_threads_sharing); | 522 | index_msb = get_count_order(num_threads_sharing); |
| 512 | 523 | ||
| 513 | for_each_online_cpu(i) { | 524 | for_each_online_cpu(i) { |
| 514 | if (cpu_data(i).apicid >> index_msb == | 525 | if (cpu_data(i).apicid >> index_msb == |
| 515 | c->apicid >> index_msb) { | 526 | c->apicid >> index_msb) { |
| 516 | cpu_set(i, this_leaf->shared_cpu_map); | 527 | cpumask_set_cpu(i, |
| 528 | to_cpumask(this_leaf->shared_cpu_map)); | ||
| 517 | if (i != cpu && per_cpu(cpuid4_info, i)) { | 529 | if (i != cpu && per_cpu(cpuid4_info, i)) { |
| 518 | sibling_leaf = CPUID4_INFO_IDX(i, index); | 530 | sibling_leaf = |
| 519 | cpu_set(cpu, sibling_leaf->shared_cpu_map); | 531 | CPUID4_INFO_IDX(i, index); |
| 532 | cpumask_set_cpu(cpu, to_cpumask( | ||
| 533 | sibling_leaf->shared_cpu_map)); | ||
| 520 | } | 534 | } |
| 521 | } | 535 | } |
| 522 | } | 536 | } |
| @@ -528,9 +542,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
| 528 | int sibling; | 542 | int sibling; |
| 529 | 543 | ||
| 530 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 544 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
| 531 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | 545 | for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { |
| 532 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 546 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
| 533 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 547 | cpumask_clear_cpu(cpu, |
| 548 | to_cpumask(sibling_leaf->shared_cpu_map)); | ||
| 534 | } | 549 | } |
| 535 | } | 550 | } |
| 536 | #else | 551 | #else |
| @@ -549,6 +564,15 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
| 549 | per_cpu(cpuid4_info, cpu) = NULL; | 564 | per_cpu(cpuid4_info, cpu) = NULL; |
| 550 | } | 565 | } |
| 551 | 566 | ||
| 567 | static int | ||
| 568 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
| 569 | { | ||
| 570 | struct _cpuid4_info_regs *leaf_regs = | ||
| 571 | (struct _cpuid4_info_regs *)this_leaf; | ||
| 572 | |||
| 573 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
| 574 | } | ||
| 575 | |||
| 552 | static void __cpuinit get_cpu_leaves(void *_retval) | 576 | static void __cpuinit get_cpu_leaves(void *_retval) |
| 553 | { | 577 | { |
| 554 | int j, *retval = _retval, cpu = smp_processor_id(); | 578 | int j, *retval = _retval, cpu = smp_processor_id(); |
| @@ -590,8 +614,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
| 590 | return retval; | 614 | return retval; |
| 591 | } | 615 | } |
| 592 | 616 | ||
| 593 | #ifdef CONFIG_SYSFS | ||
| 594 | |||
| 595 | #include <linux/kobject.h> | 617 | #include <linux/kobject.h> |
| 596 | #include <linux/sysfs.h> | 618 | #include <linux/sysfs.h> |
| 597 | 619 | ||
| @@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
| 635 | int n = 0; | 657 | int n = 0; |
| 636 | 658 | ||
| 637 | if (len > 1) { | 659 | if (len > 1) { |
| 638 | cpumask_t *mask = &this_leaf->shared_cpu_map; | 660 | const struct cpumask *mask; |
| 639 | 661 | ||
| 662 | mask = to_cpumask(this_leaf->shared_cpu_map); | ||
| 640 | n = type? | 663 | n = type? |
| 641 | cpulist_scnprintf(buf, len-2, mask) : | 664 | cpulist_scnprintf(buf, len-2, mask) : |
| 642 | cpumask_scnprintf(buf, len-2, mask); | 665 | cpumask_scnprintf(buf, len-2, mask); |
| @@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node) | |||
| 699 | 722 | ||
| 700 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | 723 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) |
| 701 | { | 724 | { |
| 702 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 725 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
| 726 | int node = cpu_to_node(cpumask_first(mask)); | ||
| 703 | struct pci_dev *dev = NULL; | 727 | struct pci_dev *dev = NULL; |
| 704 | ssize_t ret = 0; | 728 | ssize_t ret = 0; |
| 705 | int i; | 729 | int i; |
| @@ -733,7 +757,8 @@ static ssize_t | |||
| 733 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | 757 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, |
| 734 | size_t count) | 758 | size_t count) |
| 735 | { | 759 | { |
| 736 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 760 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
| 761 | int node = cpu_to_node(cpumask_first(mask)); | ||
| 737 | struct pci_dev *dev = NULL; | 762 | struct pci_dev *dev = NULL; |
| 738 | unsigned int ret, index, val; | 763 | unsigned int ret, index, val; |
| 739 | 764 | ||
| @@ -878,7 +903,7 @@ err_out: | |||
| 878 | return -ENOMEM; | 903 | return -ENOMEM; |
| 879 | } | 904 | } |
| 880 | 905 | ||
| 881 | static cpumask_t cache_dev_map = CPU_MASK_NONE; | 906 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); |
| 882 | 907 | ||
| 883 | /* Add/Remove cache interface for CPU device */ | 908 | /* Add/Remove cache interface for CPU device */ |
| 884 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | 909 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) |
| @@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 918 | } | 943 | } |
| 919 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); | 944 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); |
| 920 | } | 945 | } |
| 921 | cpu_set(cpu, cache_dev_map); | 946 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); |
| 922 | 947 | ||
| 923 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | 948 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); |
| 924 | return 0; | 949 | return 0; |
| @@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
| 931 | 956 | ||
| 932 | if (per_cpu(cpuid4_info, cpu) == NULL) | 957 | if (per_cpu(cpuid4_info, cpu) == NULL) |
| 933 | return; | 958 | return; |
| 934 | if (!cpu_isset(cpu, cache_dev_map)) | 959 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) |
| 935 | return; | 960 | return; |
| 936 | cpu_clear(cpu, cache_dev_map); | 961 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); |
| 937 | 962 | ||
| 938 | for (i = 0; i < num_cache_leaves; i++) | 963 | for (i = 0; i < num_cache_leaves; i++) |
| 939 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 964 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
