aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_64.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-01-30 07:30:55 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:55 -0500
commit53756d3722172815f52272b28c6d5d5e9639adde (patch)
treef376d3378a47c9d442d8444ce7fa78b557bcd8b5 /arch/x86/kernel/setup_64.c
parent5548fecdff5617ba3a2f09f0e585e1ac6e1bd25c (diff)
x86: add set/clear_cpu_cap operations
The patch to suppress bitops-related warnings added a pile of ugly casts. Many of these were related to the management of x86 CPU capabilities. Clean these up by adding specific set/clear_cpu_cap macros, and use them consistently. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/setup_64.c')
-rw-r--r--arch/x86/kernel/setup_64.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 63dd39b843b5..ce4d6b52ce36 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -667,13 +667,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
667 level = cpuid_eax(1); 667 level = cpuid_eax(1);
668 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || 668 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
669 level >= 0x0f58)) 669 level >= 0x0f58))
670 set_bit(X86_FEATURE_REP_GOOD, (unsigned long *)&c->x86_capability); 670 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
671 if (c->x86 == 0x10 || c->x86 == 0x11) 671 if (c->x86 == 0x10 || c->x86 == 0x11)
672 set_bit(X86_FEATURE_REP_GOOD, (unsigned long *)&c->x86_capability); 672 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
673 673
674 /* Enable workaround for FXSAVE leak */ 674 /* Enable workaround for FXSAVE leak */
675 if (c->x86 >= 6) 675 if (c->x86 >= 6)
676 set_bit(X86_FEATURE_FXSAVE_LEAK, (unsigned long *)&c->x86_capability); 676 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
677 677
678 level = get_model_name(c); 678 level = get_model_name(c);
679 if (!level) { 679 if (!level) {
@@ -689,7 +689,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
689 689
690 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ 690 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
691 if (c->x86_power & (1<<8)) 691 if (c->x86_power & (1<<8))
692 set_bit(X86_FEATURE_CONSTANT_TSC, (unsigned long *)&c->x86_capability); 692 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
693 693
694 /* Multi core CPU? */ 694 /* Multi core CPU? */
695 if (c->extended_cpuid_level >= 0x80000008) 695 if (c->extended_cpuid_level >= 0x80000008)
@@ -702,14 +702,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
702 num_cache_leaves = 3; 702 num_cache_leaves = 3;
703 703
704 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) 704 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
705 set_bit(X86_FEATURE_K8, (unsigned long *)&c->x86_capability); 705 set_cpu_cap(c, X86_FEATURE_K8);
706 706
707 /* RDTSC can be speculated around */ 707 /* RDTSC can be speculated around */
708 clear_bit(X86_FEATURE_SYNC_RDTSC, (unsigned long *)&c->x86_capability); 708 clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
709 709
710 /* Family 10 doesn't support C states in MWAIT so don't use it */ 710 /* Family 10 doesn't support C states in MWAIT so don't use it */
711 if (c->x86 == 0x10 && !force_mwait) 711 if (c->x86 == 0x10 && !force_mwait)
712 clear_bit(X86_FEATURE_MWAIT, (unsigned long *)&c->x86_capability); 712 clear_cpu_cap(c, X86_FEATURE_MWAIT);
713 713
714 if (amd_apic_timer_broken()) 714 if (amd_apic_timer_broken())
715 disable_apic_timer = 1; 715 disable_apic_timer = 1;
@@ -811,17 +811,16 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
811 unsigned eax = cpuid_eax(10); 811 unsigned eax = cpuid_eax(10);
812 /* Check for version and the number of counters */ 812 /* Check for version and the number of counters */
813 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 813 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
814 set_bit(X86_FEATURE_ARCH_PERFMON, 814 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
815 (unsigned long *)&c->x86_capability);
816 } 815 }
817 816
818 if (cpu_has_ds) { 817 if (cpu_has_ds) {
819 unsigned int l1, l2; 818 unsigned int l1, l2;
820 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 819 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
821 if (!(l1 & (1<<11))) 820 if (!(l1 & (1<<11)))
822 set_bit(X86_FEATURE_BTS, (unsigned long *)c->x86_capability); 821 set_cpu_cap(c, X86_FEATURE_BTS);
823 if (!(l1 & (1<<12))) 822 if (!(l1 & (1<<12)))
824 set_bit(X86_FEATURE_PEBS, (unsigned long *)c->x86_capability); 823 set_cpu_cap(c, X86_FEATURE_PEBS);
825 } 824 }
826 825
827 n = c->extended_cpuid_level; 826 n = c->extended_cpuid_level;
@@ -840,13 +839,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
840 c->x86_cache_alignment = c->x86_clflush_size * 2; 839 c->x86_cache_alignment = c->x86_clflush_size * 2;
841 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 840 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
842 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 841 (c->x86 == 0x6 && c->x86_model >= 0x0e))
843 set_bit(X86_FEATURE_CONSTANT_TSC, (unsigned long *)&c->x86_capability); 842 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
844 if (c->x86 == 6) 843 if (c->x86 == 6)
845 set_bit(X86_FEATURE_REP_GOOD, (unsigned long *)&c->x86_capability); 844 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
846 if (c->x86 == 15) 845 if (c->x86 == 15)
847 set_bit(X86_FEATURE_SYNC_RDTSC, (unsigned long *)&c->x86_capability); 846 set_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
848 else 847 else
849 clear_bit(X86_FEATURE_SYNC_RDTSC, (unsigned long *)&c->x86_capability); 848 clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
850 c->x86_max_cores = intel_num_cpu_cores(c); 849 c->x86_max_cores = intel_num_cpu_cores(c);
851 850
852 srat_detect_node(); 851 srat_detect_node();