diff options
author | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
---|---|---|
committer | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
commit | ee3e542fec6e69bc9fb668698889a37d93950ddf (patch) | |
tree | e74ee766a4764769ef1d3d45d266b4dea64101d3 /arch/x86/kernel | |
parent | fe2a801b50c0bb8039d627e5ae1fec249d10ff39 (diff) | |
parent | f1d6e17f540af37bb1891480143669ba7636c4cf (diff) |
Merge remote-tracking branch 'linus/master' into testing
Diffstat (limited to 'arch/x86/kernel')
94 files changed, 2685 insertions, 1001 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 7bd3bd310106..88d99ea77723 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -16,6 +16,8 @@ CFLAGS_REMOVE_ftrace.o = -pg | |||
16 | CFLAGS_REMOVE_early_printk.o = -pg | 16 | CFLAGS_REMOVE_early_printk.o = -pg |
17 | endif | 17 | endif |
18 | 18 | ||
19 | CFLAGS_irq.o := -I$(src)/../include/asm/trace | ||
20 | |||
19 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o | 21 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o |
20 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 22 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
21 | obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o | 23 | obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o |
@@ -67,7 +69,7 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | |||
67 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 69 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
68 | obj-y += kprobes/ | 70 | obj-y += kprobes/ |
69 | obj-$(CONFIG_MODULES) += module.o | 71 | obj-$(CONFIG_MODULES) += module.o |
70 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o | 72 | obj-$(CONFIG_DOUBLEFAULT) += doublefault.o |
71 | obj-$(CONFIG_KGDB) += kgdb.o | 73 | obj-$(CONFIG_KGDB) += kgdb.o |
72 | obj-$(CONFIG_VM86) += vm86_32.o | 74 | obj-$(CONFIG_VM86) += vm86_32.o |
73 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 75 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
@@ -93,6 +95,7 @@ obj-$(CONFIG_MICROCODE_INTEL_LIB) += microcode_intel_lib.o | |||
93 | microcode-y := microcode_core.o | 95 | microcode-y := microcode_core.o |
94 | microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o | 96 | microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o |
95 | microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o | 97 | microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o |
98 | obj-$(CONFIG_MICROCODE_AMD_EARLY) += microcode_amd_early.o | ||
96 | obj-$(CONFIG_MICROCODE) += microcode.o | 99 | obj-$(CONFIG_MICROCODE) += microcode.o |
97 | 100 | ||
98 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | 101 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o |
@@ -102,6 +105,7 @@ obj-$(CONFIG_OF) += devicetree.o | |||
102 | obj-$(CONFIG_UPROBES) += uprobes.o | 105 | obj-$(CONFIG_UPROBES) += uprobes.o |
103 | 106 | ||
104 | obj-$(CONFIG_PERF_EVENTS) += perf_regs.o | 107 | obj-$(CONFIG_PERF_EVENTS) += perf_regs.o |
108 | obj-$(CONFIG_TRACING) += tracepoint.o | ||
105 | 109 | ||
106 | ### | 110 | ### |
107 | # 64 bit specific files | 111 | # 64 bit specific files |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 230c8ea878e5..2627a81253ee 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/mpspec.h> | 44 | #include <asm/mpspec.h> |
45 | #include <asm/smp.h> | 45 | #include <asm/smp.h> |
46 | 46 | ||
47 | #include "sleep.h" /* To include x86_acpi_suspend_lowlevel */ | ||
47 | static int __initdata acpi_force = 0; | 48 | static int __initdata acpi_force = 0; |
48 | u32 acpi_rsdt_forced; | 49 | u32 acpi_rsdt_forced; |
49 | int acpi_disabled; | 50 | int acpi_disabled; |
@@ -194,7 +195,7 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) | |||
194 | return 0; | 195 | return 0; |
195 | } | 196 | } |
196 | 197 | ||
197 | static void __cpuinit acpi_register_lapic(int id, u8 enabled) | 198 | static void acpi_register_lapic(int id, u8 enabled) |
198 | { | 199 | { |
199 | unsigned int ver = 0; | 200 | unsigned int ver = 0; |
200 | 201 | ||
@@ -559,6 +560,12 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, | |||
559 | int (*__acpi_register_gsi)(struct device *dev, u32 gsi, | 560 | int (*__acpi_register_gsi)(struct device *dev, u32 gsi, |
560 | int trigger, int polarity) = acpi_register_gsi_pic; | 561 | int trigger, int polarity) = acpi_register_gsi_pic; |
561 | 562 | ||
563 | #ifdef CONFIG_ACPI_SLEEP | ||
564 | int (*acpi_suspend_lowlevel)(void) = x86_acpi_suspend_lowlevel; | ||
565 | #else | ||
566 | int (*acpi_suspend_lowlevel)(void); | ||
567 | #endif | ||
568 | |||
562 | /* | 569 | /* |
563 | * success: return IRQ number (>=0) | 570 | * success: return IRQ number (>=0) |
564 | * failure: return < 0 | 571 | * failure: return < 0 |
@@ -600,7 +607,7 @@ void __init acpi_set_irq_model_ioapic(void) | |||
600 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 607 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
601 | #include <acpi/processor.h> | 608 | #include <acpi/processor.h> |
602 | 609 | ||
603 | static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | 610 | static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) |
604 | { | 611 | { |
605 | #ifdef CONFIG_ACPI_NUMA | 612 | #ifdef CONFIG_ACPI_NUMA |
606 | int nid; | 613 | int nid; |
@@ -613,7 +620,7 @@ static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | |||
613 | #endif | 620 | #endif |
614 | } | 621 | } |
615 | 622 | ||
616 | static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) | 623 | static int _acpi_map_lsapic(acpi_handle handle, int *pcpu) |
617 | { | 624 | { |
618 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 625 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
619 | union acpi_object *obj; | 626 | union acpi_object *obj; |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index b44577bc9744..33120100ff5e 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -26,12 +26,12 @@ static char temp_stack[4096]; | |||
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * acpi_suspend_lowlevel - save kernel state | 29 | * x86_acpi_suspend_lowlevel - save kernel state |
30 | * | 30 | * |
31 | * Create an identity mapped page table and copy the wakeup routine to | 31 | * Create an identity mapped page table and copy the wakeup routine to |
32 | * low memory. | 32 | * low memory. |
33 | */ | 33 | */ |
34 | int acpi_suspend_lowlevel(void) | 34 | int x86_acpi_suspend_lowlevel(void) |
35 | { | 35 | { |
36 | struct wakeup_header *header = | 36 | struct wakeup_header *header = |
37 | (struct wakeup_header *) __va(real_mode_header->wakeup_header); | 37 | (struct wakeup_header *) __va(real_mode_header->wakeup_header); |
@@ -48,9 +48,20 @@ int acpi_suspend_lowlevel(void) | |||
48 | #ifndef CONFIG_64BIT | 48 | #ifndef CONFIG_64BIT |
49 | native_store_gdt((struct desc_ptr *)&header->pmode_gdt); | 49 | native_store_gdt((struct desc_ptr *)&header->pmode_gdt); |
50 | 50 | ||
51 | /* | ||
52 | * We have to check that we can write back the value, and not | ||
53 | * just read it. At least on 90 nm Pentium M (Family 6, Model | ||
54 | * 13), reading an invalid MSR is not guaranteed to trap, see | ||
55 | * Erratum X4 in "Intel Pentium M Processor on 90 nm Process | ||
56 | * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90 | ||
57 | * nm process with 512-KB L2 Cache Specification Update". | ||
58 | */ | ||
51 | if (!rdmsr_safe(MSR_EFER, | 59 | if (!rdmsr_safe(MSR_EFER, |
52 | &header->pmode_efer_low, | 60 | &header->pmode_efer_low, |
53 | &header->pmode_efer_high)) | 61 | &header->pmode_efer_high) && |
62 | !wrmsr_safe(MSR_EFER, | ||
63 | header->pmode_efer_low, | ||
64 | header->pmode_efer_high)) | ||
54 | header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); | 65 | header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); |
55 | #endif /* !CONFIG_64BIT */ | 66 | #endif /* !CONFIG_64BIT */ |
56 | 67 | ||
@@ -61,7 +72,10 @@ int acpi_suspend_lowlevel(void) | |||
61 | } | 72 | } |
62 | if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, | 73 | if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, |
63 | &header->pmode_misc_en_low, | 74 | &header->pmode_misc_en_low, |
64 | &header->pmode_misc_en_high)) | 75 | &header->pmode_misc_en_high) && |
76 | !wrmsr_safe(MSR_IA32_MISC_ENABLE, | ||
77 | header->pmode_misc_en_low, | ||
78 | header->pmode_misc_en_high)) | ||
65 | header->pmode_behavior |= | 79 | header->pmode_behavior |= |
66 | (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); | 80 | (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); |
67 | header->realmode_flags = acpi_realmode_flags; | 81 | header->realmode_flags = acpi_realmode_flags; |
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index 67f59f8c6956..c9c2c982d5e4 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h | |||
@@ -15,3 +15,5 @@ extern unsigned long acpi_copy_wakeup_routine(unsigned long); | |||
15 | extern void wakeup_long64(void); | 15 | extern void wakeup_long64(void); |
16 | 16 | ||
17 | extern void do_suspend_lowlevel(void); | 17 | extern void do_suspend_lowlevel(void); |
18 | |||
19 | extern int x86_acpi_suspend_lowlevel(void); | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 904611bf0e5a..eca89c53a7f5 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/smp.h> | 35 | #include <linux/smp.h> |
36 | #include <linux/mm.h> | 36 | #include <linux/mm.h> |
37 | 37 | ||
38 | #include <asm/trace/irq_vectors.h> | ||
38 | #include <asm/irq_remapping.h> | 39 | #include <asm/irq_remapping.h> |
39 | #include <asm/perf_event.h> | 40 | #include <asm/perf_event.h> |
40 | #include <asm/x86_init.h> | 41 | #include <asm/x86_init.h> |
@@ -57,7 +58,7 @@ | |||
57 | 58 | ||
58 | unsigned int num_processors; | 59 | unsigned int num_processors; |
59 | 60 | ||
60 | unsigned disabled_cpus __cpuinitdata; | 61 | unsigned disabled_cpus; |
61 | 62 | ||
62 | /* Processor that is doing the boot up */ | 63 | /* Processor that is doing the boot up */ |
63 | unsigned int boot_cpu_physical_apicid = -1U; | 64 | unsigned int boot_cpu_physical_apicid = -1U; |
@@ -543,7 +544,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | |||
543 | * Setup the local APIC timer for this CPU. Copy the initialized values | 544 | * Setup the local APIC timer for this CPU. Copy the initialized values |
544 | * of the boot CPU and register the clock event in the framework. | 545 | * of the boot CPU and register the clock event in the framework. |
545 | */ | 546 | */ |
546 | static void __cpuinit setup_APIC_timer(void) | 547 | static void setup_APIC_timer(void) |
547 | { | 548 | { |
548 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 549 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
549 | 550 | ||
@@ -865,7 +866,7 @@ void __init setup_boot_APIC_clock(void) | |||
865 | setup_APIC_timer(); | 866 | setup_APIC_timer(); |
866 | } | 867 | } |
867 | 868 | ||
868 | void __cpuinit setup_secondary_APIC_clock(void) | 869 | void setup_secondary_APIC_clock(void) |
869 | { | 870 | { |
870 | setup_APIC_timer(); | 871 | setup_APIC_timer(); |
871 | } | 872 | } |
@@ -919,17 +920,35 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) | |||
919 | /* | 920 | /* |
920 | * NOTE! We'd better ACK the irq immediately, | 921 | * NOTE! We'd better ACK the irq immediately, |
921 | * because timer handling can be slow. | 922 | * because timer handling can be slow. |
923 | * | ||
924 | * update_process_times() expects us to have done irq_enter(). | ||
925 | * Besides, if we don't timer interrupts ignore the global | ||
926 | * interrupt lock, which is the WrongThing (tm) to do. | ||
922 | */ | 927 | */ |
923 | ack_APIC_irq(); | 928 | entering_ack_irq(); |
929 | local_apic_timer_interrupt(); | ||
930 | exiting_irq(); | ||
931 | |||
932 | set_irq_regs(old_regs); | ||
933 | } | ||
934 | |||
935 | void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs) | ||
936 | { | ||
937 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
938 | |||
924 | /* | 939 | /* |
940 | * NOTE! We'd better ACK the irq immediately, | ||
941 | * because timer handling can be slow. | ||
942 | * | ||
925 | * update_process_times() expects us to have done irq_enter(). | 943 | * update_process_times() expects us to have done irq_enter(). |
926 | * Besides, if we don't timer interrupts ignore the global | 944 | * Besides, if we don't timer interrupts ignore the global |
927 | * interrupt lock, which is the WrongThing (tm) to do. | 945 | * interrupt lock, which is the WrongThing (tm) to do. |
928 | */ | 946 | */ |
929 | irq_enter(); | 947 | entering_ack_irq(); |
930 | exit_idle(); | 948 | trace_local_timer_entry(LOCAL_TIMER_VECTOR); |
931 | local_apic_timer_interrupt(); | 949 | local_apic_timer_interrupt(); |
932 | irq_exit(); | 950 | trace_local_timer_exit(LOCAL_TIMER_VECTOR); |
951 | exiting_irq(); | ||
933 | 952 | ||
934 | set_irq_regs(old_regs); | 953 | set_irq_regs(old_regs); |
935 | } | 954 | } |
@@ -1210,7 +1229,7 @@ void __init init_bsp_APIC(void) | |||
1210 | apic_write(APIC_LVT1, value); | 1229 | apic_write(APIC_LVT1, value); |
1211 | } | 1230 | } |
1212 | 1231 | ||
1213 | static void __cpuinit lapic_setup_esr(void) | 1232 | static void lapic_setup_esr(void) |
1214 | { | 1233 | { |
1215 | unsigned int oldvalue, value, maxlvt; | 1234 | unsigned int oldvalue, value, maxlvt; |
1216 | 1235 | ||
@@ -1257,7 +1276,7 @@ static void __cpuinit lapic_setup_esr(void) | |||
1257 | * Used to setup local APIC while initializing BSP or bringin up APs. | 1276 | * Used to setup local APIC while initializing BSP or bringin up APs. |
1258 | * Always called with preemption disabled. | 1277 | * Always called with preemption disabled. |
1259 | */ | 1278 | */ |
1260 | void __cpuinit setup_local_APIC(void) | 1279 | void setup_local_APIC(void) |
1261 | { | 1280 | { |
1262 | int cpu = smp_processor_id(); | 1281 | int cpu = smp_processor_id(); |
1263 | unsigned int value, queued; | 1282 | unsigned int value, queued; |
@@ -1452,7 +1471,7 @@ void __cpuinit setup_local_APIC(void) | |||
1452 | #endif | 1471 | #endif |
1453 | } | 1472 | } |
1454 | 1473 | ||
1455 | void __cpuinit end_local_APIC_setup(void) | 1474 | void end_local_APIC_setup(void) |
1456 | { | 1475 | { |
1457 | lapic_setup_esr(); | 1476 | lapic_setup_esr(); |
1458 | 1477 | ||
@@ -1907,12 +1926,10 @@ int __init APIC_init_uniprocessor(void) | |||
1907 | /* | 1926 | /* |
1908 | * This interrupt should _never_ happen with our APIC/SMP architecture | 1927 | * This interrupt should _never_ happen with our APIC/SMP architecture |
1909 | */ | 1928 | */ |
1910 | void smp_spurious_interrupt(struct pt_regs *regs) | 1929 | static inline void __smp_spurious_interrupt(void) |
1911 | { | 1930 | { |
1912 | u32 v; | 1931 | u32 v; |
1913 | 1932 | ||
1914 | irq_enter(); | ||
1915 | exit_idle(); | ||
1916 | /* | 1933 | /* |
1917 | * Check if this really is a spurious interrupt and ACK it | 1934 | * Check if this really is a spurious interrupt and ACK it |
1918 | * if it is a vectored one. Just in case... | 1935 | * if it is a vectored one. Just in case... |
@@ -1927,13 +1944,28 @@ void smp_spurious_interrupt(struct pt_regs *regs) | |||
1927 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ | 1944 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ |
1928 | pr_info("spurious APIC interrupt on CPU#%d, " | 1945 | pr_info("spurious APIC interrupt on CPU#%d, " |
1929 | "should never happen.\n", smp_processor_id()); | 1946 | "should never happen.\n", smp_processor_id()); |
1930 | irq_exit(); | 1947 | } |
1948 | |||
1949 | void smp_spurious_interrupt(struct pt_regs *regs) | ||
1950 | { | ||
1951 | entering_irq(); | ||
1952 | __smp_spurious_interrupt(); | ||
1953 | exiting_irq(); | ||
1954 | } | ||
1955 | |||
1956 | void smp_trace_spurious_interrupt(struct pt_regs *regs) | ||
1957 | { | ||
1958 | entering_irq(); | ||
1959 | trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR); | ||
1960 | __smp_spurious_interrupt(); | ||
1961 | trace_spurious_apic_exit(SPURIOUS_APIC_VECTOR); | ||
1962 | exiting_irq(); | ||
1931 | } | 1963 | } |
1932 | 1964 | ||
1933 | /* | 1965 | /* |
1934 | * This interrupt should never happen with our APIC/SMP architecture | 1966 | * This interrupt should never happen with our APIC/SMP architecture |
1935 | */ | 1967 | */ |
1936 | void smp_error_interrupt(struct pt_regs *regs) | 1968 | static inline void __smp_error_interrupt(struct pt_regs *regs) |
1937 | { | 1969 | { |
1938 | u32 v0, v1; | 1970 | u32 v0, v1; |
1939 | u32 i = 0; | 1971 | u32 i = 0; |
@@ -1948,8 +1980,6 @@ void smp_error_interrupt(struct pt_regs *regs) | |||
1948 | "Illegal register address", /* APIC Error Bit 7 */ | 1980 | "Illegal register address", /* APIC Error Bit 7 */ |
1949 | }; | 1981 | }; |
1950 | 1982 | ||
1951 | irq_enter(); | ||
1952 | exit_idle(); | ||
1953 | /* First tickle the hardware, only then report what went on. -- REW */ | 1983 | /* First tickle the hardware, only then report what went on. -- REW */ |
1954 | v0 = apic_read(APIC_ESR); | 1984 | v0 = apic_read(APIC_ESR); |
1955 | apic_write(APIC_ESR, 0); | 1985 | apic_write(APIC_ESR, 0); |
@@ -1970,7 +2000,22 @@ void smp_error_interrupt(struct pt_regs *regs) | |||
1970 | 2000 | ||
1971 | apic_printk(APIC_DEBUG, KERN_CONT "\n"); | 2001 | apic_printk(APIC_DEBUG, KERN_CONT "\n"); |
1972 | 2002 | ||
1973 | irq_exit(); | 2003 | } |
2004 | |||
2005 | void smp_error_interrupt(struct pt_regs *regs) | ||
2006 | { | ||
2007 | entering_irq(); | ||
2008 | __smp_error_interrupt(regs); | ||
2009 | exiting_irq(); | ||
2010 | } | ||
2011 | |||
2012 | void smp_trace_error_interrupt(struct pt_regs *regs) | ||
2013 | { | ||
2014 | entering_irq(); | ||
2015 | trace_error_apic_entry(ERROR_APIC_VECTOR); | ||
2016 | __smp_error_interrupt(regs); | ||
2017 | trace_error_apic_exit(ERROR_APIC_VECTOR); | ||
2018 | exiting_irq(); | ||
1974 | } | 2019 | } |
1975 | 2020 | ||
1976 | /** | 2021 | /** |
@@ -2062,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |||
2062 | apic_write(APIC_LVT1, value); | 2107 | apic_write(APIC_LVT1, value); |
2063 | } | 2108 | } |
2064 | 2109 | ||
2065 | void __cpuinit generic_processor_info(int apicid, int version) | 2110 | void generic_processor_info(int apicid, int version) |
2066 | { | 2111 | { |
2067 | int cpu, max = nr_cpu_ids; | 2112 | int cpu, max = nr_cpu_ids; |
2068 | bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, | 2113 | bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, |
@@ -2302,7 +2347,7 @@ static void lapic_resume(void) | |||
2302 | apic_write(APIC_SPIV, apic_pm_state.apic_spiv); | 2347 | apic_write(APIC_SPIV, apic_pm_state.apic_spiv); |
2303 | apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); | 2348 | apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); |
2304 | apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); | 2349 | apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); |
2305 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) | 2350 | #if defined(CONFIG_X86_MCE_INTEL) |
2306 | if (maxlvt >= 5) | 2351 | if (maxlvt >= 5) |
2307 | apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); | 2352 | apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); |
2308 | #endif | 2353 | #endif |
@@ -2332,7 +2377,7 @@ static struct syscore_ops lapic_syscore_ops = { | |||
2332 | .suspend = lapic_suspend, | 2377 | .suspend = lapic_suspend, |
2333 | }; | 2378 | }; |
2334 | 2379 | ||
2335 | static void __cpuinit apic_pm_activate(void) | 2380 | static void apic_pm_activate(void) |
2336 | { | 2381 | { |
2337 | apic_pm_state.active = 1; | 2382 | apic_pm_state.active = 1; |
2338 | } | 2383 | } |
@@ -2357,7 +2402,7 @@ static void apic_pm_activate(void) { } | |||
2357 | 2402 | ||
2358 | #ifdef CONFIG_X86_64 | 2403 | #ifdef CONFIG_X86_64 |
2359 | 2404 | ||
2360 | static int __cpuinit apic_cluster_num(void) | 2405 | static int apic_cluster_num(void) |
2361 | { | 2406 | { |
2362 | int i, clusters, zeros; | 2407 | int i, clusters, zeros; |
2363 | unsigned id; | 2408 | unsigned id; |
@@ -2402,10 +2447,10 @@ static int __cpuinit apic_cluster_num(void) | |||
2402 | return clusters; | 2447 | return clusters; |
2403 | } | 2448 | } |
2404 | 2449 | ||
2405 | static int __cpuinitdata multi_checked; | 2450 | static int multi_checked; |
2406 | static int __cpuinitdata multi; | 2451 | static int multi; |
2407 | 2452 | ||
2408 | static int __cpuinit set_multi(const struct dmi_system_id *d) | 2453 | static int set_multi(const struct dmi_system_id *d) |
2409 | { | 2454 | { |
2410 | if (multi) | 2455 | if (multi) |
2411 | return 0; | 2456 | return 0; |
@@ -2414,7 +2459,7 @@ static int __cpuinit set_multi(const struct dmi_system_id *d) | |||
2414 | return 0; | 2459 | return 0; |
2415 | } | 2460 | } |
2416 | 2461 | ||
2417 | static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { | 2462 | static const struct dmi_system_id multi_dmi_table[] = { |
2418 | { | 2463 | { |
2419 | .callback = set_multi, | 2464 | .callback = set_multi, |
2420 | .ident = "IBM System Summit2", | 2465 | .ident = "IBM System Summit2", |
@@ -2426,7 +2471,7 @@ static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { | |||
2426 | {} | 2471 | {} |
2427 | }; | 2472 | }; |
2428 | 2473 | ||
2429 | static void __cpuinit dmi_check_multi(void) | 2474 | static void dmi_check_multi(void) |
2430 | { | 2475 | { |
2431 | if (multi_checked) | 2476 | if (multi_checked) |
2432 | return; | 2477 | return; |
@@ -2443,7 +2488,7 @@ static void __cpuinit dmi_check_multi(void) | |||
2443 | * multi-chassis. | 2488 | * multi-chassis. |
2444 | * Use DMI to check them | 2489 | * Use DMI to check them |
2445 | */ | 2490 | */ |
2446 | __cpuinit int apic_is_clustered_box(void) | 2491 | int apic_is_clustered_box(void) |
2447 | { | 2492 | { |
2448 | dmi_check_multi(); | 2493 | dmi_check_multi(); |
2449 | if (multi) | 2494 | if (multi) |
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 9a9110918ca7..3e67f9e3d7ef 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c | |||
@@ -74,7 +74,7 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb) | |||
74 | return initial_apic_id >> index_msb; | 74 | return initial_apic_id >> index_msb; |
75 | } | 75 | } |
76 | 76 | ||
77 | static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) | 77 | static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) |
78 | { | 78 | { |
79 | union numachip_csr_g3_ext_irq_gen int_gen; | 79 | union numachip_csr_g3_ext_irq_gen int_gen; |
80 | 80 | ||
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 0874799a98c6..c55224731b2d 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -130,7 +130,7 @@ int es7000_plat; | |||
130 | */ | 130 | */ |
131 | 131 | ||
132 | 132 | ||
133 | static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) | 133 | static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) |
134 | { | 134 | { |
135 | unsigned long vect = 0, psaival = 0; | 135 | unsigned long vect = 0, psaival = 0; |
136 | 136 | ||
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index d661ee95cabf..1e42e8f305ee 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -105,7 +105,7 @@ static void __init smp_dump_qct(void) | |||
105 | } | 105 | } |
106 | } | 106 | } |
107 | 107 | ||
108 | void __cpuinit numaq_tsc_disable(void) | 108 | void numaq_tsc_disable(void) |
109 | { | 109 | { |
110 | if (!found_numaq) | 110 | if (!found_numaq) |
111 | return; | 111 | return; |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index c88baa4ff0e5..140e29db478d 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -148,7 +148,7 @@ static void init_x2apic_ldr(void) | |||
148 | /* | 148 | /* |
149 | * At CPU state changes, update the x2apic cluster sibling info. | 149 | * At CPU state changes, update the x2apic cluster sibling info. |
150 | */ | 150 | */ |
151 | static int __cpuinit | 151 | static int |
152 | update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) | 152 | update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) |
153 | { | 153 | { |
154 | unsigned int this_cpu = (unsigned long)hcpu; | 154 | unsigned int this_cpu = (unsigned long)hcpu; |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 794f6eb54cd3..1191ac1c9d25 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/kdebug.h> | 25 | #include <linux/kdebug.h> |
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/crash_dump.h> | 27 | #include <linux/crash_dump.h> |
28 | #include <linux/reboot.h> | ||
28 | 29 | ||
29 | #include <asm/uv/uv_mmrs.h> | 30 | #include <asm/uv/uv_mmrs.h> |
30 | #include <asm/uv/uv_hub.h> | 31 | #include <asm/uv/uv_hub.h> |
@@ -36,7 +37,6 @@ | |||
36 | #include <asm/ipi.h> | 37 | #include <asm/ipi.h> |
37 | #include <asm/smp.h> | 38 | #include <asm/smp.h> |
38 | #include <asm/x86_init.h> | 39 | #include <asm/x86_init.h> |
39 | #include <asm/emergency-restart.h> | ||
40 | #include <asm/nmi.h> | 40 | #include <asm/nmi.h> |
41 | 41 | ||
42 | /* BMC sets a bit this MMR non-zero before sending an NMI */ | 42 | /* BMC sets a bit this MMR non-zero before sending an NMI */ |
@@ -51,6 +51,8 @@ DEFINE_PER_CPU(int, x2apic_extra_bits); | |||
51 | 51 | ||
52 | static enum uv_system_type uv_system_type; | 52 | static enum uv_system_type uv_system_type; |
53 | static u64 gru_start_paddr, gru_end_paddr; | 53 | static u64 gru_start_paddr, gru_end_paddr; |
54 | static u64 gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr; | ||
55 | static u64 gru_dist_lmask, gru_dist_umask; | ||
54 | static union uvh_apicid uvh_apicid; | 56 | static union uvh_apicid uvh_apicid; |
55 | int uv_min_hub_revision_id; | 57 | int uv_min_hub_revision_id; |
56 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); | 58 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); |
@@ -72,7 +74,20 @@ static unsigned long __init uv_early_read_mmr(unsigned long addr) | |||
72 | 74 | ||
73 | static inline bool is_GRU_range(u64 start, u64 end) | 75 | static inline bool is_GRU_range(u64 start, u64 end) |
74 | { | 76 | { |
75 | return start >= gru_start_paddr && end <= gru_end_paddr; | 77 | if (gru_dist_base) { |
78 | u64 su = start & gru_dist_umask; /* upper (incl pnode) bits */ | ||
79 | u64 sl = start & gru_dist_lmask; /* base offset bits */ | ||
80 | u64 eu = end & gru_dist_umask; | ||
81 | u64 el = end & gru_dist_lmask; | ||
82 | |||
83 | /* Must reside completely within a single GRU range */ | ||
84 | return (sl == gru_dist_base && el == gru_dist_base && | ||
85 | su >= gru_first_node_paddr && | ||
86 | su <= gru_last_node_paddr && | ||
87 | eu == su); | ||
88 | } else { | ||
89 | return start >= gru_start_paddr && end <= gru_end_paddr; | ||
90 | } | ||
76 | } | 91 | } |
77 | 92 | ||
78 | static bool uv_is_untracked_pat_range(u64 start, u64 end) | 93 | static bool uv_is_untracked_pat_range(u64 start, u64 end) |
@@ -194,7 +209,7 @@ EXPORT_SYMBOL_GPL(uv_possible_blades); | |||
194 | unsigned long sn_rtc_cycles_per_second; | 209 | unsigned long sn_rtc_cycles_per_second; |
195 | EXPORT_SYMBOL(sn_rtc_cycles_per_second); | 210 | EXPORT_SYMBOL(sn_rtc_cycles_per_second); |
196 | 211 | ||
197 | static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) | 212 | static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) |
198 | { | 213 | { |
199 | #ifdef CONFIG_SMP | 214 | #ifdef CONFIG_SMP |
200 | unsigned long val; | 215 | unsigned long val; |
@@ -401,7 +416,7 @@ static struct apic __refdata apic_x2apic_uv_x = { | |||
401 | .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, | 416 | .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, |
402 | }; | 417 | }; |
403 | 418 | ||
404 | static __cpuinit void set_x2apic_extra_bits(int pnode) | 419 | static void set_x2apic_extra_bits(int pnode) |
405 | { | 420 | { |
406 | __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); | 421 | __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); |
407 | } | 422 | } |
@@ -463,26 +478,63 @@ static __init void map_high(char *id, unsigned long base, int pshift, | |||
463 | pr_info("UV: Map %s_HI base address NULL\n", id); | 478 | pr_info("UV: Map %s_HI base address NULL\n", id); |
464 | return; | 479 | return; |
465 | } | 480 | } |
466 | pr_info("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes); | 481 | pr_debug("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes); |
467 | if (map_type == map_uc) | 482 | if (map_type == map_uc) |
468 | init_extra_mapping_uc(paddr, bytes); | 483 | init_extra_mapping_uc(paddr, bytes); |
469 | else | 484 | else |
470 | init_extra_mapping_wb(paddr, bytes); | 485 | init_extra_mapping_wb(paddr, bytes); |
471 | } | 486 | } |
472 | 487 | ||
488 | static __init void map_gru_distributed(unsigned long c) | ||
489 | { | ||
490 | union uvh_rh_gam_gru_overlay_config_mmr_u gru; | ||
491 | u64 paddr; | ||
492 | unsigned long bytes; | ||
493 | int nid; | ||
494 | |||
495 | gru.v = c; | ||
496 | /* only base bits 42:28 relevant in dist mode */ | ||
497 | gru_dist_base = gru.v & 0x000007fff0000000UL; | ||
498 | if (!gru_dist_base) { | ||
499 | pr_info("UV: Map GRU_DIST base address NULL\n"); | ||
500 | return; | ||
501 | } | ||
502 | bytes = 1UL << UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; | ||
503 | gru_dist_lmask = ((1UL << uv_hub_info->m_val) - 1) & ~(bytes - 1); | ||
504 | gru_dist_umask = ~((1UL << uv_hub_info->m_val) - 1); | ||
505 | gru_dist_base &= gru_dist_lmask; /* Clear bits above M */ | ||
506 | for_each_online_node(nid) { | ||
507 | paddr = ((u64)uv_node_to_pnode(nid) << uv_hub_info->m_val) | | ||
508 | gru_dist_base; | ||
509 | init_extra_mapping_wb(paddr, bytes); | ||
510 | gru_first_node_paddr = min(paddr, gru_first_node_paddr); | ||
511 | gru_last_node_paddr = max(paddr, gru_last_node_paddr); | ||
512 | } | ||
513 | /* Save upper (63:M) bits of address only for is_GRU_range */ | ||
514 | gru_first_node_paddr &= gru_dist_umask; | ||
515 | gru_last_node_paddr &= gru_dist_umask; | ||
516 | pr_debug("UV: Map GRU_DIST base 0x%016llx 0x%016llx - 0x%016llx\n", | ||
517 | gru_dist_base, gru_first_node_paddr, gru_last_node_paddr); | ||
518 | } | ||
519 | |||
473 | static __init void map_gru_high(int max_pnode) | 520 | static __init void map_gru_high(int max_pnode) |
474 | { | 521 | { |
475 | union uvh_rh_gam_gru_overlay_config_mmr_u gru; | 522 | union uvh_rh_gam_gru_overlay_config_mmr_u gru; |
476 | int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; | 523 | int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; |
477 | 524 | ||
478 | gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); | 525 | gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); |
479 | if (gru.s.enable) { | 526 | if (!gru.s.enable) { |
480 | map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); | ||
481 | gru_start_paddr = ((u64)gru.s.base << shift); | ||
482 | gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); | ||
483 | } else { | ||
484 | pr_info("UV: GRU disabled\n"); | 527 | pr_info("UV: GRU disabled\n"); |
528 | return; | ||
529 | } | ||
530 | |||
531 | if (is_uv3_hub() && gru.s3.mode) { | ||
532 | map_gru_distributed(gru.v); | ||
533 | return; | ||
485 | } | 534 | } |
535 | map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); | ||
536 | gru_start_paddr = ((u64)gru.s.base << shift); | ||
537 | gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); | ||
486 | } | 538 | } |
487 | 539 | ||
488 | static __init void map_mmr_high(int max_pnode) | 540 | static __init void map_mmr_high(int max_pnode) |
@@ -683,7 +735,7 @@ static void uv_heartbeat(unsigned long ignored) | |||
683 | mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); | 735 | mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); |
684 | } | 736 | } |
685 | 737 | ||
686 | static void __cpuinit uv_heartbeat_enable(int cpu) | 738 | static void uv_heartbeat_enable(int cpu) |
687 | { | 739 | { |
688 | while (!uv_cpu_hub_info(cpu)->scir.enabled) { | 740 | while (!uv_cpu_hub_info(cpu)->scir.enabled) { |
689 | struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; | 741 | struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; |
@@ -700,7 +752,7 @@ static void __cpuinit uv_heartbeat_enable(int cpu) | |||
700 | } | 752 | } |
701 | 753 | ||
702 | #ifdef CONFIG_HOTPLUG_CPU | 754 | #ifdef CONFIG_HOTPLUG_CPU |
703 | static void __cpuinit uv_heartbeat_disable(int cpu) | 755 | static void uv_heartbeat_disable(int cpu) |
704 | { | 756 | { |
705 | if (uv_cpu_hub_info(cpu)->scir.enabled) { | 757 | if (uv_cpu_hub_info(cpu)->scir.enabled) { |
706 | uv_cpu_hub_info(cpu)->scir.enabled = 0; | 758 | uv_cpu_hub_info(cpu)->scir.enabled = 0; |
@@ -712,8 +764,8 @@ static void __cpuinit uv_heartbeat_disable(int cpu) | |||
712 | /* | 764 | /* |
713 | * cpu hotplug notifier | 765 | * cpu hotplug notifier |
714 | */ | 766 | */ |
715 | static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, | 767 | static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action, |
716 | unsigned long action, void *hcpu) | 768 | void *hcpu) |
717 | { | 769 | { |
718 | long cpu = (long)hcpu; | 770 | long cpu = (long)hcpu; |
719 | 771 | ||
@@ -783,7 +835,7 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode, | |||
783 | * Called on each cpu to initialize the per_cpu UV data area. | 835 | * Called on each cpu to initialize the per_cpu UV data area. |
784 | * FIXME: hotplug not supported yet | 836 | * FIXME: hotplug not supported yet |
785 | */ | 837 | */ |
786 | void __cpuinit uv_cpu_init(void) | 838 | void uv_cpu_init(void) |
787 | { | 839 | { |
788 | /* CPU 0 initilization will be done via uv_system_init. */ | 840 | /* CPU 0 initilization will be done via uv_system_init. */ |
789 | if (!uv_blade_info) | 841 | if (!uv_blade_info) |
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 0ef4bba2acb7..d67c4be3e8b1 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
@@ -28,7 +28,6 @@ void foo(void) | |||
28 | OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); | 28 | OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); |
29 | OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); | 29 | OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); |
30 | OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); | 30 | OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); |
31 | OFFSET(CPUINFO_hard_math, cpuinfo_x86, hard_math); | ||
32 | OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); | 31 | OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); |
33 | OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); | 32 | OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); |
34 | OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); | 33 | OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index b0684e4a73aa..47b56a7e99cb 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -31,11 +31,15 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o | |||
31 | 31 | ||
32 | ifdef CONFIG_PERF_EVENTS | 32 | ifdef CONFIG_PERF_EVENTS |
33 | obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o | 33 | obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o |
34 | ifdef CONFIG_AMD_IOMMU | ||
35 | obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o | ||
36 | endif | ||
34 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o | 37 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o |
35 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o | 38 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o |
36 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o | 39 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o |
37 | endif | 40 | endif |
38 | 41 | ||
42 | |||
39 | obj-$(CONFIG_X86_MCE) += mcheck/ | 43 | obj-$(CONFIG_X86_MCE) += mcheck/ |
40 | obj-$(CONFIG_MTRR) += mtrr/ | 44 | obj-$(CONFIG_MTRR) += mtrr/ |
41 | 45 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 5013a48d1aff..f654ecefea5b 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -69,7 +69,7 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | |||
69 | extern void vide(void); | 69 | extern void vide(void); |
70 | __asm__(".align 4\nvide: ret"); | 70 | __asm__(".align 4\nvide: ret"); |
71 | 71 | ||
72 | static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | 72 | static void init_amd_k5(struct cpuinfo_x86 *c) |
73 | { | 73 | { |
74 | /* | 74 | /* |
75 | * General Systems BIOSen alias the cpu frequency registers | 75 | * General Systems BIOSen alias the cpu frequency registers |
@@ -87,10 +87,10 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | |||
87 | } | 87 | } |
88 | 88 | ||
89 | 89 | ||
90 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | 90 | static void init_amd_k6(struct cpuinfo_x86 *c) |
91 | { | 91 | { |
92 | u32 l, h; | 92 | u32 l, h; |
93 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | 93 | int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); |
94 | 94 | ||
95 | if (c->x86_model < 6) { | 95 | if (c->x86_model < 6) { |
96 | /* Based on AMD doc 20734R - June 2000 */ | 96 | /* Based on AMD doc 20734R - June 2000 */ |
@@ -179,7 +179,7 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
179 | } | 179 | } |
180 | } | 180 | } |
181 | 181 | ||
182 | static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | 182 | static void amd_k7_smp_check(struct cpuinfo_x86 *c) |
183 | { | 183 | { |
184 | /* calling is from identify_secondary_cpu() ? */ | 184 | /* calling is from identify_secondary_cpu() ? */ |
185 | if (!c->cpu_index) | 185 | if (!c->cpu_index) |
@@ -222,7 +222,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | |||
222 | add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); | 222 | add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); |
223 | } | 223 | } |
224 | 224 | ||
225 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | 225 | static void init_amd_k7(struct cpuinfo_x86 *c) |
226 | { | 226 | { |
227 | u32 l, h; | 227 | u32 l, h; |
228 | 228 | ||
@@ -267,7 +267,7 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
267 | * To workaround broken NUMA config. Read the comment in | 267 | * To workaround broken NUMA config. Read the comment in |
268 | * srat_detect_node(). | 268 | * srat_detect_node(). |
269 | */ | 269 | */ |
270 | static int __cpuinit nearby_node(int apicid) | 270 | static int nearby_node(int apicid) |
271 | { | 271 | { |
272 | int i, node; | 272 | int i, node; |
273 | 273 | ||
@@ -292,7 +292,7 @@ static int __cpuinit nearby_node(int apicid) | |||
292 | * (2) AMD processors supporting compute units | 292 | * (2) AMD processors supporting compute units |
293 | */ | 293 | */ |
294 | #ifdef CONFIG_X86_HT | 294 | #ifdef CONFIG_X86_HT |
295 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | 295 | static void amd_get_topology(struct cpuinfo_x86 *c) |
296 | { | 296 | { |
297 | u32 nodes, cores_per_cu = 1; | 297 | u32 nodes, cores_per_cu = 1; |
298 | u8 node_id; | 298 | u8 node_id; |
@@ -342,7 +342,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | |||
342 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | 342 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. |
343 | * Assumes number of cores is a power of two. | 343 | * Assumes number of cores is a power of two. |
344 | */ | 344 | */ |
345 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | 345 | static void amd_detect_cmp(struct cpuinfo_x86 *c) |
346 | { | 346 | { |
347 | #ifdef CONFIG_X86_HT | 347 | #ifdef CONFIG_X86_HT |
348 | unsigned bits; | 348 | unsigned bits; |
@@ -369,7 +369,7 @@ u16 amd_get_nb_id(int cpu) | |||
369 | } | 369 | } |
370 | EXPORT_SYMBOL_GPL(amd_get_nb_id); | 370 | EXPORT_SYMBOL_GPL(amd_get_nb_id); |
371 | 371 | ||
372 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | 372 | static void srat_detect_node(struct cpuinfo_x86 *c) |
373 | { | 373 | { |
374 | #ifdef CONFIG_NUMA | 374 | #ifdef CONFIG_NUMA |
375 | int cpu = smp_processor_id(); | 375 | int cpu = smp_processor_id(); |
@@ -421,7 +421,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
421 | #endif | 421 | #endif |
422 | } | 422 | } |
423 | 423 | ||
424 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | 424 | static void early_init_amd_mc(struct cpuinfo_x86 *c) |
425 | { | 425 | { |
426 | #ifdef CONFIG_X86_HT | 426 | #ifdef CONFIG_X86_HT |
427 | unsigned bits, ecx; | 427 | unsigned bits, ecx; |
@@ -447,7 +447,7 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | |||
447 | #endif | 447 | #endif |
448 | } | 448 | } |
449 | 449 | ||
450 | static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) | 450 | static void bsp_init_amd(struct cpuinfo_x86 *c) |
451 | { | 451 | { |
452 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | 452 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { |
453 | 453 | ||
@@ -475,7 +475,7 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) | |||
475 | } | 475 | } |
476 | } | 476 | } |
477 | 477 | ||
478 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | 478 | static void early_init_amd(struct cpuinfo_x86 *c) |
479 | { | 479 | { |
480 | early_init_amd_mc(c); | 480 | early_init_amd_mc(c); |
481 | 481 | ||
@@ -514,7 +514,7 @@ static const int amd_erratum_383[]; | |||
514 | static const int amd_erratum_400[]; | 514 | static const int amd_erratum_400[]; |
515 | static bool cpu_has_amd_erratum(const int *erratum); | 515 | static bool cpu_has_amd_erratum(const int *erratum); |
516 | 516 | ||
517 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 517 | static void init_amd(struct cpuinfo_x86 *c) |
518 | { | 518 | { |
519 | u32 dummy; | 519 | u32 dummy; |
520 | unsigned long long value; | 520 | unsigned long long value; |
@@ -740,8 +740,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
740 | } | 740 | } |
741 | 741 | ||
742 | #ifdef CONFIG_X86_32 | 742 | #ifdef CONFIG_X86_32 |
743 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, | 743 | static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
744 | unsigned int size) | ||
745 | { | 744 | { |
746 | /* AMD errata T13 (order #21922) */ | 745 | /* AMD errata T13 (order #21922) */ |
747 | if ((c->x86 == 6)) { | 746 | if ((c->x86 == 6)) { |
@@ -757,7 +756,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, | |||
757 | } | 756 | } |
758 | #endif | 757 | #endif |
759 | 758 | ||
760 | static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) | 759 | static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) |
761 | { | 760 | { |
762 | tlb_flushall_shift = 5; | 761 | tlb_flushall_shift = 5; |
763 | 762 | ||
@@ -765,7 +764,7 @@ static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) | |||
765 | tlb_flushall_shift = 4; | 764 | tlb_flushall_shift = 4; |
766 | } | 765 | } |
767 | 766 | ||
768 | static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) | 767 | static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) |
769 | { | 768 | { |
770 | u32 ebx, eax, ecx, edx; | 769 | u32 ebx, eax, ecx, edx; |
771 | u16 mask = 0xfff; | 770 | u16 mask = 0xfff; |
@@ -820,7 +819,7 @@ static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) | |||
820 | cpu_set_tlb_flushall_shift(c); | 819 | cpu_set_tlb_flushall_shift(c); |
821 | } | 820 | } |
822 | 821 | ||
823 | static const struct cpu_dev __cpuinitconst amd_cpu_dev = { | 822 | static const struct cpu_dev amd_cpu_dev = { |
824 | .c_vendor = "AMD", | 823 | .c_vendor = "AMD", |
825 | .c_ident = { "AuthenticAMD" }, | 824 | .c_ident = { "AuthenticAMD" }, |
826 | #ifdef CONFIG_X86_32 | 825 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4112be9a4659..03445346ee0a 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -17,15 +17,6 @@ | |||
17 | #include <asm/paravirt.h> | 17 | #include <asm/paravirt.h> |
18 | #include <asm/alternative.h> | 18 | #include <asm/alternative.h> |
19 | 19 | ||
20 | static int __init no_387(char *s) | ||
21 | { | ||
22 | boot_cpu_data.hard_math = 0; | ||
23 | write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0()); | ||
24 | return 1; | ||
25 | } | ||
26 | |||
27 | __setup("no387", no_387); | ||
28 | |||
29 | static double __initdata x = 4195835.0; | 20 | static double __initdata x = 4195835.0; |
30 | static double __initdata y = 3145727.0; | 21 | static double __initdata y = 3145727.0; |
31 | 22 | ||
@@ -44,15 +35,6 @@ static void __init check_fpu(void) | |||
44 | { | 35 | { |
45 | s32 fdiv_bug; | 36 | s32 fdiv_bug; |
46 | 37 | ||
47 | if (!boot_cpu_data.hard_math) { | ||
48 | #ifndef CONFIG_MATH_EMULATION | ||
49 | pr_emerg("No coprocessor found and no math emulation present\n"); | ||
50 | pr_emerg("Giving up\n"); | ||
51 | for (;;) ; | ||
52 | #endif | ||
53 | return; | ||
54 | } | ||
55 | |||
56 | kernel_fpu_begin(); | 38 | kernel_fpu_begin(); |
57 | 39 | ||
58 | /* | 40 | /* |
@@ -107,5 +89,6 @@ void __init check_bugs(void) | |||
107 | * kernel_fpu_begin/end() in check_fpu() relies on the patched | 89 | * kernel_fpu_begin/end() in check_fpu() relies on the patched |
108 | * alternative instructions. | 90 | * alternative instructions. |
109 | */ | 91 | */ |
110 | check_fpu(); | 92 | if (cpu_has_fpu) |
93 | check_fpu(); | ||
111 | } | 94 | } |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 159103c0b1f4..fbf6c3bc2400 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #ifdef CONFIG_X86_OOSTORE | 12 | #ifdef CONFIG_X86_OOSTORE |
13 | 13 | ||
14 | static u32 __cpuinit power2(u32 x) | 14 | static u32 power2(u32 x) |
15 | { | 15 | { |
16 | u32 s = 1; | 16 | u32 s = 1; |
17 | 17 | ||
@@ -25,7 +25,7 @@ static u32 __cpuinit power2(u32 x) | |||
25 | /* | 25 | /* |
26 | * Set up an actual MCR | 26 | * Set up an actual MCR |
27 | */ | 27 | */ |
28 | static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) | 28 | static void centaur_mcr_insert(int reg, u32 base, u32 size, int key) |
29 | { | 29 | { |
30 | u32 lo, hi; | 30 | u32 lo, hi; |
31 | 31 | ||
@@ -42,7 +42,7 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) | |||
42 | * | 42 | * |
43 | * Shortcut: We know you can't put 4Gig of RAM on a winchip | 43 | * Shortcut: We know you can't put 4Gig of RAM on a winchip |
44 | */ | 44 | */ |
45 | static u32 __cpuinit ramtop(void) | 45 | static u32 ramtop(void) |
46 | { | 46 | { |
47 | u32 clip = 0xFFFFFFFFUL; | 47 | u32 clip = 0xFFFFFFFFUL; |
48 | u32 top = 0; | 48 | u32 top = 0; |
@@ -91,7 +91,7 @@ static u32 __cpuinit ramtop(void) | |||
91 | /* | 91 | /* |
92 | * Compute a set of MCR's to give maximum coverage | 92 | * Compute a set of MCR's to give maximum coverage |
93 | */ | 93 | */ |
94 | static int __cpuinit centaur_mcr_compute(int nr, int key) | 94 | static int centaur_mcr_compute(int nr, int key) |
95 | { | 95 | { |
96 | u32 mem = ramtop(); | 96 | u32 mem = ramtop(); |
97 | u32 root = power2(mem); | 97 | u32 root = power2(mem); |
@@ -157,7 +157,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key) | |||
157 | return ct; | 157 | return ct; |
158 | } | 158 | } |
159 | 159 | ||
160 | static void __cpuinit centaur_create_optimal_mcr(void) | 160 | static void centaur_create_optimal_mcr(void) |
161 | { | 161 | { |
162 | int used; | 162 | int used; |
163 | int i; | 163 | int i; |
@@ -181,7 +181,7 @@ static void __cpuinit centaur_create_optimal_mcr(void) | |||
181 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | 181 | wrmsr(MSR_IDT_MCR0+i, 0, 0); |
182 | } | 182 | } |
183 | 183 | ||
184 | static void __cpuinit winchip2_create_optimal_mcr(void) | 184 | static void winchip2_create_optimal_mcr(void) |
185 | { | 185 | { |
186 | u32 lo, hi; | 186 | u32 lo, hi; |
187 | int used; | 187 | int used; |
@@ -217,7 +217,7 @@ static void __cpuinit winchip2_create_optimal_mcr(void) | |||
217 | /* | 217 | /* |
218 | * Handle the MCR key on the Winchip 2. | 218 | * Handle the MCR key on the Winchip 2. |
219 | */ | 219 | */ |
220 | static void __cpuinit winchip2_unprotect_mcr(void) | 220 | static void winchip2_unprotect_mcr(void) |
221 | { | 221 | { |
222 | u32 lo, hi; | 222 | u32 lo, hi; |
223 | u32 key; | 223 | u32 key; |
@@ -229,7 +229,7 @@ static void __cpuinit winchip2_unprotect_mcr(void) | |||
229 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 229 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
230 | } | 230 | } |
231 | 231 | ||
232 | static void __cpuinit winchip2_protect_mcr(void) | 232 | static void winchip2_protect_mcr(void) |
233 | { | 233 | { |
234 | u32 lo, hi; | 234 | u32 lo, hi; |
235 | 235 | ||
@@ -247,7 +247,7 @@ static void __cpuinit winchip2_protect_mcr(void) | |||
247 | #define RNG_ENABLED (1 << 3) | 247 | #define RNG_ENABLED (1 << 3) |
248 | #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ | 248 | #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ |
249 | 249 | ||
250 | static void __cpuinit init_c3(struct cpuinfo_x86 *c) | 250 | static void init_c3(struct cpuinfo_x86 *c) |
251 | { | 251 | { |
252 | u32 lo, hi; | 252 | u32 lo, hi; |
253 | 253 | ||
@@ -318,7 +318,7 @@ enum { | |||
318 | EAMD3D = 1<<20, | 318 | EAMD3D = 1<<20, |
319 | }; | 319 | }; |
320 | 320 | ||
321 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | 321 | static void early_init_centaur(struct cpuinfo_x86 *c) |
322 | { | 322 | { |
323 | switch (c->x86) { | 323 | switch (c->x86) { |
324 | #ifdef CONFIG_X86_32 | 324 | #ifdef CONFIG_X86_32 |
@@ -337,7 +337,7 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | |||
337 | #endif | 337 | #endif |
338 | } | 338 | } |
339 | 339 | ||
340 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 340 | static void init_centaur(struct cpuinfo_x86 *c) |
341 | { | 341 | { |
342 | #ifdef CONFIG_X86_32 | 342 | #ifdef CONFIG_X86_32 |
343 | char *name; | 343 | char *name; |
@@ -468,7 +468,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | |||
468 | #endif | 468 | #endif |
469 | } | 469 | } |
470 | 470 | ||
471 | static unsigned int __cpuinit | 471 | static unsigned int |
472 | centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 472 | centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
473 | { | 473 | { |
474 | #ifdef CONFIG_X86_32 | 474 | #ifdef CONFIG_X86_32 |
@@ -488,7 +488,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
488 | return size; | 488 | return size; |
489 | } | 489 | } |
490 | 490 | ||
491 | static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { | 491 | static const struct cpu_dev centaur_cpu_dev = { |
492 | .c_vendor = "Centaur", | 492 | .c_vendor = "Centaur", |
493 | .c_ident = { "CentaurHauls" }, | 493 | .c_ident = { "CentaurHauls" }, |
494 | .c_early_init = early_init_centaur, | 494 | .c_early_init = early_init_centaur, |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 22018f70a671..25eb2747b063 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -63,7 +63,7 @@ void __init setup_cpu_local_masks(void) | |||
63 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | 63 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
64 | } | 64 | } |
65 | 65 | ||
66 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 66 | static void default_init(struct cpuinfo_x86 *c) |
67 | { | 67 | { |
68 | #ifdef CONFIG_X86_64 | 68 | #ifdef CONFIG_X86_64 |
69 | cpu_detect_cache_sizes(c); | 69 | cpu_detect_cache_sizes(c); |
@@ -80,13 +80,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
80 | #endif | 80 | #endif |
81 | } | 81 | } |
82 | 82 | ||
83 | static const struct cpu_dev __cpuinitconst default_cpu = { | 83 | static const struct cpu_dev default_cpu = { |
84 | .c_init = default_init, | 84 | .c_init = default_init, |
85 | .c_vendor = "Unknown", | 85 | .c_vendor = "Unknown", |
86 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | 86 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
87 | }; | 87 | }; |
88 | 88 | ||
89 | static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | 89 | static const struct cpu_dev *this_cpu = &default_cpu; |
90 | 90 | ||
91 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 91 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
92 | #ifdef CONFIG_X86_64 | 92 | #ifdef CONFIG_X86_64 |
@@ -160,8 +160,8 @@ static int __init x86_xsaveopt_setup(char *s) | |||
160 | __setup("noxsaveopt", x86_xsaveopt_setup); | 160 | __setup("noxsaveopt", x86_xsaveopt_setup); |
161 | 161 | ||
162 | #ifdef CONFIG_X86_32 | 162 | #ifdef CONFIG_X86_32 |
163 | static int cachesize_override __cpuinitdata = -1; | 163 | static int cachesize_override = -1; |
164 | static int disable_x86_serial_nr __cpuinitdata = 1; | 164 | static int disable_x86_serial_nr = 1; |
165 | 165 | ||
166 | static int __init cachesize_setup(char *str) | 166 | static int __init cachesize_setup(char *str) |
167 | { | 167 | { |
@@ -215,12 +215,12 @@ static inline int flag_is_changeable_p(u32 flag) | |||
215 | } | 215 | } |
216 | 216 | ||
217 | /* Probe for the CPUID instruction */ | 217 | /* Probe for the CPUID instruction */ |
218 | int __cpuinit have_cpuid_p(void) | 218 | int have_cpuid_p(void) |
219 | { | 219 | { |
220 | return flag_is_changeable_p(X86_EFLAGS_ID); | 220 | return flag_is_changeable_p(X86_EFLAGS_ID); |
221 | } | 221 | } |
222 | 222 | ||
223 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 223 | static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
224 | { | 224 | { |
225 | unsigned long lo, hi; | 225 | unsigned long lo, hi; |
226 | 226 | ||
@@ -298,7 +298,7 @@ struct cpuid_dependent_feature { | |||
298 | u32 level; | 298 | u32 level; |
299 | }; | 299 | }; |
300 | 300 | ||
301 | static const struct cpuid_dependent_feature __cpuinitconst | 301 | static const struct cpuid_dependent_feature |
302 | cpuid_dependent_features[] = { | 302 | cpuid_dependent_features[] = { |
303 | { X86_FEATURE_MWAIT, 0x00000005 }, | 303 | { X86_FEATURE_MWAIT, 0x00000005 }, |
304 | { X86_FEATURE_DCA, 0x00000009 }, | 304 | { X86_FEATURE_DCA, 0x00000009 }, |
@@ -306,7 +306,7 @@ cpuid_dependent_features[] = { | |||
306 | { 0, 0 } | 306 | { 0, 0 } |
307 | }; | 307 | }; |
308 | 308 | ||
309 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | 309 | static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
310 | { | 310 | { |
311 | const struct cpuid_dependent_feature *df; | 311 | const struct cpuid_dependent_feature *df; |
312 | 312 | ||
@@ -344,7 +344,7 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | |||
344 | */ | 344 | */ |
345 | 345 | ||
346 | /* Look up CPU names by table lookup. */ | 346 | /* Look up CPU names by table lookup. */ |
347 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) | 347 | static const char *table_lookup_model(struct cpuinfo_x86 *c) |
348 | { | 348 | { |
349 | const struct cpu_model_info *info; | 349 | const struct cpu_model_info *info; |
350 | 350 | ||
@@ -364,8 +364,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) | |||
364 | return NULL; /* Not found */ | 364 | return NULL; /* Not found */ |
365 | } | 365 | } |
366 | 366 | ||
367 | __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; | 367 | __u32 cpu_caps_cleared[NCAPINTS]; |
368 | __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; | 368 | __u32 cpu_caps_set[NCAPINTS]; |
369 | 369 | ||
370 | void load_percpu_segment(int cpu) | 370 | void load_percpu_segment(int cpu) |
371 | { | 371 | { |
@@ -394,9 +394,9 @@ void switch_to_new_gdt(int cpu) | |||
394 | load_percpu_segment(cpu); | 394 | load_percpu_segment(cpu); |
395 | } | 395 | } |
396 | 396 | ||
397 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; | 397 | static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
398 | 398 | ||
399 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | 399 | static void get_model_name(struct cpuinfo_x86 *c) |
400 | { | 400 | { |
401 | unsigned int *v; | 401 | unsigned int *v; |
402 | char *p, *q; | 402 | char *p, *q; |
@@ -425,7 +425,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
425 | } | 425 | } |
426 | } | 426 | } |
427 | 427 | ||
428 | void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) | 428 | void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
429 | { | 429 | { |
430 | unsigned int n, dummy, ebx, ecx, edx, l2size; | 430 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
431 | 431 | ||
@@ -479,7 +479,7 @@ u16 __read_mostly tlb_lld_4m[NR_INFO]; | |||
479 | */ | 479 | */ |
480 | s8 __read_mostly tlb_flushall_shift = -1; | 480 | s8 __read_mostly tlb_flushall_shift = -1; |
481 | 481 | ||
482 | void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) | 482 | void cpu_detect_tlb(struct cpuinfo_x86 *c) |
483 | { | 483 | { |
484 | if (this_cpu->c_detect_tlb) | 484 | if (this_cpu->c_detect_tlb) |
485 | this_cpu->c_detect_tlb(c); | 485 | this_cpu->c_detect_tlb(c); |
@@ -493,7 +493,7 @@ void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) | |||
493 | tlb_flushall_shift); | 493 | tlb_flushall_shift); |
494 | } | 494 | } |
495 | 495 | ||
496 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 496 | void detect_ht(struct cpuinfo_x86 *c) |
497 | { | 497 | { |
498 | #ifdef CONFIG_X86_HT | 498 | #ifdef CONFIG_X86_HT |
499 | u32 eax, ebx, ecx, edx; | 499 | u32 eax, ebx, ecx, edx; |
@@ -544,7 +544,7 @@ out: | |||
544 | #endif | 544 | #endif |
545 | } | 545 | } |
546 | 546 | ||
547 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 547 | static void get_cpu_vendor(struct cpuinfo_x86 *c) |
548 | { | 548 | { |
549 | char *v = c->x86_vendor_id; | 549 | char *v = c->x86_vendor_id; |
550 | int i; | 550 | int i; |
@@ -571,7 +571,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
571 | this_cpu = &default_cpu; | 571 | this_cpu = &default_cpu; |
572 | } | 572 | } |
573 | 573 | ||
574 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | 574 | void cpu_detect(struct cpuinfo_x86 *c) |
575 | { | 575 | { |
576 | /* Get vendor name */ | 576 | /* Get vendor name */ |
577 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 577 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
@@ -601,7 +601,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
601 | } | 601 | } |
602 | } | 602 | } |
603 | 603 | ||
604 | void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | 604 | void get_cpu_cap(struct cpuinfo_x86 *c) |
605 | { | 605 | { |
606 | u32 tfms, xlvl; | 606 | u32 tfms, xlvl; |
607 | u32 ebx; | 607 | u32 ebx; |
@@ -652,7 +652,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
652 | init_scattered_cpuid_features(c); | 652 | init_scattered_cpuid_features(c); |
653 | } | 653 | } |
654 | 654 | ||
655 | static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) | 655 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
656 | { | 656 | { |
657 | #ifdef CONFIG_X86_32 | 657 | #ifdef CONFIG_X86_32 |
658 | int i; | 658 | int i; |
@@ -711,10 +711,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
711 | return; | 711 | return; |
712 | 712 | ||
713 | cpu_detect(c); | 713 | cpu_detect(c); |
714 | |||
715 | get_cpu_vendor(c); | 714 | get_cpu_vendor(c); |
716 | |||
717 | get_cpu_cap(c); | 715 | get_cpu_cap(c); |
716 | fpu_detect(c); | ||
718 | 717 | ||
719 | if (this_cpu->c_early_init) | 718 | if (this_cpu->c_early_init) |
720 | this_cpu->c_early_init(c); | 719 | this_cpu->c_early_init(c); |
@@ -724,6 +723,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
724 | 723 | ||
725 | if (this_cpu->c_bsp_init) | 724 | if (this_cpu->c_bsp_init) |
726 | this_cpu->c_bsp_init(c); | 725 | this_cpu->c_bsp_init(c); |
726 | |||
727 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); | ||
727 | } | 728 | } |
728 | 729 | ||
729 | void __init early_cpu_init(void) | 730 | void __init early_cpu_init(void) |
@@ -768,7 +769,7 @@ void __init early_cpu_init(void) | |||
768 | * unless we can find a reliable way to detect all the broken cases. | 769 | * unless we can find a reliable way to detect all the broken cases. |
769 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). | 770 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). |
770 | */ | 771 | */ |
771 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | 772 | static void detect_nopl(struct cpuinfo_x86 *c) |
772 | { | 773 | { |
773 | #ifdef CONFIG_X86_32 | 774 | #ifdef CONFIG_X86_32 |
774 | clear_cpu_cap(c, X86_FEATURE_NOPL); | 775 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
@@ -777,7 +778,7 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | |||
777 | #endif | 778 | #endif |
778 | } | 779 | } |
779 | 780 | ||
780 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 781 | static void generic_identify(struct cpuinfo_x86 *c) |
781 | { | 782 | { |
782 | c->extended_cpuid_level = 0; | 783 | c->extended_cpuid_level = 0; |
783 | 784 | ||
@@ -814,7 +815,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
814 | /* | 815 | /* |
815 | * This does the hard work of actually picking apart the CPU stuff... | 816 | * This does the hard work of actually picking apart the CPU stuff... |
816 | */ | 817 | */ |
817 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | 818 | static void identify_cpu(struct cpuinfo_x86 *c) |
818 | { | 819 | { |
819 | int i; | 820 | int i; |
820 | 821 | ||
@@ -959,7 +960,7 @@ void __init identify_boot_cpu(void) | |||
959 | cpu_detect_tlb(&boot_cpu_data); | 960 | cpu_detect_tlb(&boot_cpu_data); |
960 | } | 961 | } |
961 | 962 | ||
962 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | 963 | void identify_secondary_cpu(struct cpuinfo_x86 *c) |
963 | { | 964 | { |
964 | BUG_ON(c == &boot_cpu_data); | 965 | BUG_ON(c == &boot_cpu_data); |
965 | identify_cpu(c); | 966 | identify_cpu(c); |
@@ -974,14 +975,14 @@ struct msr_range { | |||
974 | unsigned max; | 975 | unsigned max; |
975 | }; | 976 | }; |
976 | 977 | ||
977 | static const struct msr_range msr_range_array[] __cpuinitconst = { | 978 | static const struct msr_range msr_range_array[] = { |
978 | { 0x00000000, 0x00000418}, | 979 | { 0x00000000, 0x00000418}, |
979 | { 0xc0000000, 0xc000040b}, | 980 | { 0xc0000000, 0xc000040b}, |
980 | { 0xc0010000, 0xc0010142}, | 981 | { 0xc0010000, 0xc0010142}, |
981 | { 0xc0011000, 0xc001103b}, | 982 | { 0xc0011000, 0xc001103b}, |
982 | }; | 983 | }; |
983 | 984 | ||
984 | static void __cpuinit __print_cpu_msr(void) | 985 | static void __print_cpu_msr(void) |
985 | { | 986 | { |
986 | unsigned index_min, index_max; | 987 | unsigned index_min, index_max; |
987 | unsigned index; | 988 | unsigned index; |
@@ -1000,7 +1001,7 @@ static void __cpuinit __print_cpu_msr(void) | |||
1000 | } | 1001 | } |
1001 | } | 1002 | } |
1002 | 1003 | ||
1003 | static int show_msr __cpuinitdata; | 1004 | static int show_msr; |
1004 | 1005 | ||
1005 | static __init int setup_show_msr(char *arg) | 1006 | static __init int setup_show_msr(char *arg) |
1006 | { | 1007 | { |
@@ -1021,7 +1022,7 @@ static __init int setup_noclflush(char *arg) | |||
1021 | } | 1022 | } |
1022 | __setup("noclflush", setup_noclflush); | 1023 | __setup("noclflush", setup_noclflush); |
1023 | 1024 | ||
1024 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 1025 | void print_cpu_info(struct cpuinfo_x86 *c) |
1025 | { | 1026 | { |
1026 | const char *vendor = NULL; | 1027 | const char *vendor = NULL; |
1027 | 1028 | ||
@@ -1050,7 +1051,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
1050 | print_cpu_msr(c); | 1051 | print_cpu_msr(c); |
1051 | } | 1052 | } |
1052 | 1053 | ||
1053 | void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) | 1054 | void print_cpu_msr(struct cpuinfo_x86 *c) |
1054 | { | 1055 | { |
1055 | if (c->cpu_index < show_msr) | 1056 | if (c->cpu_index < show_msr) |
1056 | __print_cpu_msr(); | 1057 | __print_cpu_msr(); |
@@ -1071,8 +1072,8 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
1071 | 1072 | ||
1072 | #ifdef CONFIG_X86_64 | 1073 | #ifdef CONFIG_X86_64 |
1073 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; | 1074 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
1074 | struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, | 1075 | struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, |
1075 | (unsigned long) nmi_idt_table }; | 1076 | (unsigned long) debug_idt_table }; |
1076 | 1077 | ||
1077 | DEFINE_PER_CPU_FIRST(union irq_stack_union, | 1078 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
1078 | irq_stack_union) __aligned(PAGE_SIZE); | 1079 | irq_stack_union) __aligned(PAGE_SIZE); |
@@ -1148,20 +1149,20 @@ int is_debug_stack(unsigned long addr) | |||
1148 | addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); | 1149 | addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); |
1149 | } | 1150 | } |
1150 | 1151 | ||
1151 | static DEFINE_PER_CPU(u32, debug_stack_use_ctr); | 1152 | DEFINE_PER_CPU(u32, debug_idt_ctr); |
1152 | 1153 | ||
1153 | void debug_stack_set_zero(void) | 1154 | void debug_stack_set_zero(void) |
1154 | { | 1155 | { |
1155 | this_cpu_inc(debug_stack_use_ctr); | 1156 | this_cpu_inc(debug_idt_ctr); |
1156 | load_idt((const struct desc_ptr *)&nmi_idt_descr); | 1157 | load_current_idt(); |
1157 | } | 1158 | } |
1158 | 1159 | ||
1159 | void debug_stack_reset(void) | 1160 | void debug_stack_reset(void) |
1160 | { | 1161 | { |
1161 | if (WARN_ON(!this_cpu_read(debug_stack_use_ctr))) | 1162 | if (WARN_ON(!this_cpu_read(debug_idt_ctr))) |
1162 | return; | 1163 | return; |
1163 | if (this_cpu_dec_return(debug_stack_use_ctr) == 0) | 1164 | if (this_cpu_dec_return(debug_idt_ctr) == 0) |
1164 | load_idt((const struct desc_ptr *)&idt_descr); | 1165 | load_current_idt(); |
1165 | } | 1166 | } |
1166 | 1167 | ||
1167 | #else /* CONFIG_X86_64 */ | 1168 | #else /* CONFIG_X86_64 */ |
@@ -1215,7 +1216,7 @@ static void dbg_restore_debug_regs(void) | |||
1215 | */ | 1216 | */ |
1216 | #ifdef CONFIG_X86_64 | 1217 | #ifdef CONFIG_X86_64 |
1217 | 1218 | ||
1218 | void __cpuinit cpu_init(void) | 1219 | void cpu_init(void) |
1219 | { | 1220 | { |
1220 | struct orig_ist *oist; | 1221 | struct orig_ist *oist; |
1221 | struct task_struct *me; | 1222 | struct task_struct *me; |
@@ -1257,7 +1258,7 @@ void __cpuinit cpu_init(void) | |||
1257 | switch_to_new_gdt(cpu); | 1258 | switch_to_new_gdt(cpu); |
1258 | loadsegment(fs, 0); | 1259 | loadsegment(fs, 0); |
1259 | 1260 | ||
1260 | load_idt((const struct desc_ptr *)&idt_descr); | 1261 | load_current_idt(); |
1261 | 1262 | ||
1262 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | 1263 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); |
1263 | syscall_init(); | 1264 | syscall_init(); |
@@ -1314,7 +1315,7 @@ void __cpuinit cpu_init(void) | |||
1314 | 1315 | ||
1315 | #else | 1316 | #else |
1316 | 1317 | ||
1317 | void __cpuinit cpu_init(void) | 1318 | void cpu_init(void) |
1318 | { | 1319 | { |
1319 | int cpu = smp_processor_id(); | 1320 | int cpu = smp_processor_id(); |
1320 | struct task_struct *curr = current; | 1321 | struct task_struct *curr = current; |
@@ -1334,7 +1335,7 @@ void __cpuinit cpu_init(void) | |||
1334 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | 1335 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) |
1335 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | 1336 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
1336 | 1337 | ||
1337 | load_idt(&idt_descr); | 1338 | load_current_idt(); |
1338 | switch_to_new_gdt(cpu); | 1339 | switch_to_new_gdt(cpu); |
1339 | 1340 | ||
1340 | /* | 1341 | /* |
@@ -1363,3 +1364,17 @@ void __cpuinit cpu_init(void) | |||
1363 | fpu_init(); | 1364 | fpu_init(); |
1364 | } | 1365 | } |
1365 | #endif | 1366 | #endif |
1367 | |||
1368 | #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS | ||
1369 | void warn_pre_alternatives(void) | ||
1370 | { | ||
1371 | WARN(1, "You're using static_cpu_has before alternatives have run!\n"); | ||
1372 | } | ||
1373 | EXPORT_SYMBOL_GPL(warn_pre_alternatives); | ||
1374 | #endif | ||
1375 | |||
1376 | inline bool __static_cpu_has_safe(u16 bit) | ||
1377 | { | ||
1378 | return boot_cpu_has(bit); | ||
1379 | } | ||
1380 | EXPORT_SYMBOL_GPL(__static_cpu_has_safe); | ||
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index d048d5ca43c1..d0969c75ab54 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -15,7 +15,7 @@ | |||
15 | /* | 15 | /* |
16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU | 16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU |
17 | */ | 17 | */ |
18 | static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | 18 | static void __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
19 | { | 19 | { |
20 | unsigned char ccr2, ccr3; | 20 | unsigned char ccr2, ccr3; |
21 | 21 | ||
@@ -44,7 +44,7 @@ static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
44 | } | 44 | } |
45 | } | 45 | } |
46 | 46 | ||
47 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | 47 | static void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
48 | { | 48 | { |
49 | unsigned long flags; | 49 | unsigned long flags; |
50 | 50 | ||
@@ -59,25 +59,25 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
59 | * Actually since bugs.h doesn't even reference this perhaps someone should | 59 | * Actually since bugs.h doesn't even reference this perhaps someone should |
60 | * fix the documentation ??? | 60 | * fix the documentation ??? |
61 | */ | 61 | */ |
62 | static unsigned char Cx86_dir0_msb __cpuinitdata = 0; | 62 | static unsigned char Cx86_dir0_msb = 0; |
63 | 63 | ||
64 | static const char __cpuinitconst Cx86_model[][9] = { | 64 | static const char Cx86_model[][9] = { |
65 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", | 65 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", |
66 | "M II ", "Unknown" | 66 | "M II ", "Unknown" |
67 | }; | 67 | }; |
68 | static const char __cpuinitconst Cx486_name[][5] = { | 68 | static const char Cx486_name[][5] = { |
69 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", | 69 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", |
70 | "SRx2", "DRx2" | 70 | "SRx2", "DRx2" |
71 | }; | 71 | }; |
72 | static const char __cpuinitconst Cx486S_name[][4] = { | 72 | static const char Cx486S_name[][4] = { |
73 | "S", "S2", "Se", "S2e" | 73 | "S", "S2", "Se", "S2e" |
74 | }; | 74 | }; |
75 | static const char __cpuinitconst Cx486D_name[][4] = { | 75 | static const char Cx486D_name[][4] = { |
76 | "DX", "DX2", "?", "?", "?", "DX4" | 76 | "DX", "DX2", "?", "?", "?", "DX4" |
77 | }; | 77 | }; |
78 | static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; | 78 | static char Cx86_cb[] = "?.5x Core/Bus Clock"; |
79 | static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; | 79 | static const char cyrix_model_mult1[] = "12??43"; |
80 | static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; | 80 | static const char cyrix_model_mult2[] = "12233445"; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old | 83 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old |
@@ -87,7 +87,7 @@ static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; | |||
87 | * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP | 87 | * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP |
88 | */ | 88 | */ |
89 | 89 | ||
90 | static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) | 90 | static void check_cx686_slop(struct cpuinfo_x86 *c) |
91 | { | 91 | { |
92 | unsigned long flags; | 92 | unsigned long flags; |
93 | 93 | ||
@@ -112,7 +112,7 @@ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | 114 | ||
115 | static void __cpuinit set_cx86_reorder(void) | 115 | static void set_cx86_reorder(void) |
116 | { | 116 | { |
117 | u8 ccr3; | 117 | u8 ccr3; |
118 | 118 | ||
@@ -127,7 +127,7 @@ static void __cpuinit set_cx86_reorder(void) | |||
127 | setCx86(CX86_CCR3, ccr3); | 127 | setCx86(CX86_CCR3, ccr3); |
128 | } | 128 | } |
129 | 129 | ||
130 | static void __cpuinit set_cx86_memwb(void) | 130 | static void set_cx86_memwb(void) |
131 | { | 131 | { |
132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); | 132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); |
133 | 133 | ||
@@ -143,7 +143,7 @@ static void __cpuinit set_cx86_memwb(void) | |||
143 | * Configure later MediaGX and/or Geode processor. | 143 | * Configure later MediaGX and/or Geode processor. |
144 | */ | 144 | */ |
145 | 145 | ||
146 | static void __cpuinit geode_configure(void) | 146 | static void geode_configure(void) |
147 | { | 147 | { |
148 | unsigned long flags; | 148 | unsigned long flags; |
149 | u8 ccr3; | 149 | u8 ccr3; |
@@ -166,7 +166,7 @@ static void __cpuinit geode_configure(void) | |||
166 | local_irq_restore(flags); | 166 | local_irq_restore(flags); |
167 | } | 167 | } |
168 | 168 | ||
169 | static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) | 169 | static void early_init_cyrix(struct cpuinfo_x86 *c) |
170 | { | 170 | { |
171 | unsigned char dir0, dir0_msn, dir1 = 0; | 171 | unsigned char dir0, dir0_msn, dir1 = 0; |
172 | 172 | ||
@@ -185,7 +185,7 @@ static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) | |||
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | 188 | static void init_cyrix(struct cpuinfo_x86 *c) |
189 | { | 189 | { |
190 | unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; | 190 | unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; |
191 | char *buf = c->x86_model_id; | 191 | char *buf = c->x86_model_id; |
@@ -333,7 +333,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
333 | switch (dir0_lsn) { | 333 | switch (dir0_lsn) { |
334 | case 0xd: /* either a 486SLC or DLC w/o DEVID */ | 334 | case 0xd: /* either a 486SLC or DLC w/o DEVID */ |
335 | dir0_msn = 0; | 335 | dir0_msn = 0; |
336 | p = Cx486_name[(c->hard_math) ? 1 : 0]; | 336 | p = Cx486_name[(cpu_has_fpu ? 1 : 0)]; |
337 | break; | 337 | break; |
338 | 338 | ||
339 | case 0xe: /* a 486S A step */ | 339 | case 0xe: /* a 486S A step */ |
@@ -356,7 +356,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
356 | /* | 356 | /* |
357 | * Handle National Semiconductor branded processors | 357 | * Handle National Semiconductor branded processors |
358 | */ | 358 | */ |
359 | static void __cpuinit init_nsc(struct cpuinfo_x86 *c) | 359 | static void init_nsc(struct cpuinfo_x86 *c) |
360 | { | 360 | { |
361 | /* | 361 | /* |
362 | * There may be GX1 processors in the wild that are branded | 362 | * There may be GX1 processors in the wild that are branded |
@@ -405,7 +405,7 @@ static inline int test_cyrix_52div(void) | |||
405 | return (unsigned char) (test >> 8) == 0x02; | 405 | return (unsigned char) (test >> 8) == 0x02; |
406 | } | 406 | } |
407 | 407 | ||
408 | static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | 408 | static void cyrix_identify(struct cpuinfo_x86 *c) |
409 | { | 409 | { |
410 | /* Detect Cyrix with disabled CPUID */ | 410 | /* Detect Cyrix with disabled CPUID */ |
411 | if (c->x86 == 4 && test_cyrix_52div()) { | 411 | if (c->x86 == 4 && test_cyrix_52div()) { |
@@ -441,7 +441,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
441 | } | 441 | } |
442 | } | 442 | } |
443 | 443 | ||
444 | static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { | 444 | static const struct cpu_dev cyrix_cpu_dev = { |
445 | .c_vendor = "Cyrix", | 445 | .c_vendor = "Cyrix", |
446 | .c_ident = { "CyrixInstead" }, | 446 | .c_ident = { "CyrixInstead" }, |
447 | .c_early_init = early_init_cyrix, | 447 | .c_early_init = early_init_cyrix, |
@@ -452,7 +452,7 @@ static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { | |||
452 | 452 | ||
453 | cpu_dev_register(cyrix_cpu_dev); | 453 | cpu_dev_register(cyrix_cpu_dev); |
454 | 454 | ||
455 | static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { | 455 | static const struct cpu_dev nsc_cpu_dev = { |
456 | .c_vendor = "NSC", | 456 | .c_vendor = "NSC", |
457 | .c_ident = { "Geode by NSC" }, | 457 | .c_ident = { "Geode by NSC" }, |
458 | .c_init = init_nsc, | 458 | .c_init = init_nsc, |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 1e7e84a02eba..87279212d318 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -60,7 +60,7 @@ detect_hypervisor_vendor(void) | |||
60 | } | 60 | } |
61 | } | 61 | } |
62 | 62 | ||
63 | void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) | 63 | void init_hypervisor(struct cpuinfo_x86 *c) |
64 | { | 64 | { |
65 | if (x86_hyper && x86_hyper->set_cpu_features) | 65 | if (x86_hyper && x86_hyper->set_cpu_features) |
66 | x86_hyper->set_cpu_features(c); | 66 | x86_hyper->set_cpu_features(c); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 9b0c441c03f5..ec7299566f79 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 29 | static void early_init_intel(struct cpuinfo_x86 *c) |
30 | { | 30 | { |
31 | u64 misc_enable; | 31 | u64 misc_enable; |
32 | 32 | ||
@@ -163,7 +163,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
163 | * This is called before we do cpu ident work | 163 | * This is called before we do cpu ident work |
164 | */ | 164 | */ |
165 | 165 | ||
166 | int __cpuinit ppro_with_ram_bug(void) | 166 | int ppro_with_ram_bug(void) |
167 | { | 167 | { |
168 | /* Uses data from early_cpu_detect now */ | 168 | /* Uses data from early_cpu_detect now */ |
169 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 169 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
@@ -176,7 +176,7 @@ int __cpuinit ppro_with_ram_bug(void) | |||
176 | return 0; | 176 | return 0; |
177 | } | 177 | } |
178 | 178 | ||
179 | static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) | 179 | static void intel_smp_check(struct cpuinfo_x86 *c) |
180 | { | 180 | { |
181 | /* calling is from identify_secondary_cpu() ? */ | 181 | /* calling is from identify_secondary_cpu() ? */ |
182 | if (!c->cpu_index) | 182 | if (!c->cpu_index) |
@@ -196,7 +196,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) | |||
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 199 | static void intel_workarounds(struct cpuinfo_x86 *c) |
200 | { | 200 | { |
201 | unsigned long lo, hi; | 201 | unsigned long lo, hi; |
202 | 202 | ||
@@ -275,12 +275,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
275 | intel_smp_check(c); | 275 | intel_smp_check(c); |
276 | } | 276 | } |
277 | #else | 277 | #else |
278 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 278 | static void intel_workarounds(struct cpuinfo_x86 *c) |
279 | { | 279 | { |
280 | } | 280 | } |
281 | #endif | 281 | #endif |
282 | 282 | ||
283 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | 283 | static void srat_detect_node(struct cpuinfo_x86 *c) |
284 | { | 284 | { |
285 | #ifdef CONFIG_NUMA | 285 | #ifdef CONFIG_NUMA |
286 | unsigned node; | 286 | unsigned node; |
@@ -300,7 +300,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
300 | /* | 300 | /* |
301 | * find out the number of processor cores on the die | 301 | * find out the number of processor cores on the die |
302 | */ | 302 | */ |
303 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | 303 | static int intel_num_cpu_cores(struct cpuinfo_x86 *c) |
304 | { | 304 | { |
305 | unsigned int eax, ebx, ecx, edx; | 305 | unsigned int eax, ebx, ecx, edx; |
306 | 306 | ||
@@ -315,7 +315,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | |||
315 | return 1; | 315 | return 1; |
316 | } | 316 | } |
317 | 317 | ||
318 | static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) | 318 | static void detect_vmx_virtcap(struct cpuinfo_x86 *c) |
319 | { | 319 | { |
320 | /* Intel VMX MSR indicated features */ | 320 | /* Intel VMX MSR indicated features */ |
321 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 | 321 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 |
@@ -353,7 +353,7 @@ static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) | |||
353 | } | 353 | } |
354 | } | 354 | } |
355 | 355 | ||
356 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 356 | static void init_intel(struct cpuinfo_x86 *c) |
357 | { | 357 | { |
358 | unsigned int l2 = 0; | 358 | unsigned int l2 = 0; |
359 | 359 | ||
@@ -472,7 +472,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
472 | } | 472 | } |
473 | 473 | ||
474 | #ifdef CONFIG_X86_32 | 474 | #ifdef CONFIG_X86_32 |
475 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 475 | static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
476 | { | 476 | { |
477 | /* | 477 | /* |
478 | * Intel PIII Tualatin. This comes in two flavours. | 478 | * Intel PIII Tualatin. This comes in two flavours. |
@@ -506,7 +506,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i | |||
506 | 506 | ||
507 | #define STLB_4K 0x41 | 507 | #define STLB_4K 0x41 |
508 | 508 | ||
509 | static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { | 509 | static const struct _tlb_table intel_tlb_table[] = { |
510 | { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, | 510 | { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, |
511 | { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, | 511 | { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, |
512 | { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, | 512 | { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, |
@@ -536,7 +536,7 @@ static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { | |||
536 | { 0x00, 0, 0 } | 536 | { 0x00, 0, 0 } |
537 | }; | 537 | }; |
538 | 538 | ||
539 | static void __cpuinit intel_tlb_lookup(const unsigned char desc) | 539 | static void intel_tlb_lookup(const unsigned char desc) |
540 | { | 540 | { |
541 | unsigned char k; | 541 | unsigned char k; |
542 | if (desc == 0) | 542 | if (desc == 0) |
@@ -605,7 +605,7 @@ static void __cpuinit intel_tlb_lookup(const unsigned char desc) | |||
605 | } | 605 | } |
606 | } | 606 | } |
607 | 607 | ||
608 | static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) | 608 | static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) |
609 | { | 609 | { |
610 | switch ((c->x86 << 8) + c->x86_model) { | 610 | switch ((c->x86 << 8) + c->x86_model) { |
611 | case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | 611 | case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ |
@@ -634,7 +634,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) | |||
634 | } | 634 | } |
635 | } | 635 | } |
636 | 636 | ||
637 | static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) | 637 | static void intel_detect_tlb(struct cpuinfo_x86 *c) |
638 | { | 638 | { |
639 | int i, j, n; | 639 | int i, j, n; |
640 | unsigned int regs[4]; | 640 | unsigned int regs[4]; |
@@ -661,7 +661,7 @@ static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) | |||
661 | intel_tlb_flushall_shift_set(c); | 661 | intel_tlb_flushall_shift_set(c); |
662 | } | 662 | } |
663 | 663 | ||
664 | static const struct cpu_dev __cpuinitconst intel_cpu_dev = { | 664 | static const struct cpu_dev intel_cpu_dev = { |
665 | .c_vendor = "Intel", | 665 | .c_vendor = "Intel", |
666 | .c_ident = { "GenuineIntel" }, | 666 | .c_ident = { "GenuineIntel" }, |
667 | #ifdef CONFIG_X86_32 | 667 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 7c6f7d548c0f..1414c90feaba 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -37,7 +37,7 @@ struct _cache_table { | |||
37 | /* All the cache descriptor types we care about (no TLB or | 37 | /* All the cache descriptor types we care about (no TLB or |
38 | trace cache entries) */ | 38 | trace cache entries) */ |
39 | 39 | ||
40 | static const struct _cache_table __cpuinitconst cache_table[] = | 40 | static const struct _cache_table cache_table[] = |
41 | { | 41 | { |
42 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 42 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
43 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ | 43 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ |
@@ -203,7 +203,7 @@ union l3_cache { | |||
203 | unsigned val; | 203 | unsigned val; |
204 | }; | 204 | }; |
205 | 205 | ||
206 | static const unsigned short __cpuinitconst assocs[] = { | 206 | static const unsigned short assocs[] = { |
207 | [1] = 1, | 207 | [1] = 1, |
208 | [2] = 2, | 208 | [2] = 2, |
209 | [4] = 4, | 209 | [4] = 4, |
@@ -217,10 +217,10 @@ static const unsigned short __cpuinitconst assocs[] = { | |||
217 | [0xf] = 0xffff /* fully associative - no way to show this currently */ | 217 | [0xf] = 0xffff /* fully associative - no way to show this currently */ |
218 | }; | 218 | }; |
219 | 219 | ||
220 | static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; | 220 | static const unsigned char levels[] = { 1, 1, 2, 3 }; |
221 | static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; | 221 | static const unsigned char types[] = { 1, 2, 3, 3 }; |
222 | 222 | ||
223 | static void __cpuinit | 223 | static void |
224 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 224 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
225 | union _cpuid4_leaf_ebx *ebx, | 225 | union _cpuid4_leaf_ebx *ebx, |
226 | union _cpuid4_leaf_ecx *ecx) | 226 | union _cpuid4_leaf_ecx *ecx) |
@@ -302,7 +302,7 @@ struct _cache_attr { | |||
302 | /* | 302 | /* |
303 | * L3 cache descriptors | 303 | * L3 cache descriptors |
304 | */ | 304 | */ |
305 | static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) | 305 | static void amd_calc_l3_indices(struct amd_northbridge *nb) |
306 | { | 306 | { |
307 | struct amd_l3_cache *l3 = &nb->l3_cache; | 307 | struct amd_l3_cache *l3 = &nb->l3_cache; |
308 | unsigned int sc0, sc1, sc2, sc3; | 308 | unsigned int sc0, sc1, sc2, sc3; |
@@ -325,7 +325,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) | |||
325 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 325 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
326 | } | 326 | } |
327 | 327 | ||
328 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) | 328 | static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) |
329 | { | 329 | { |
330 | int node; | 330 | int node; |
331 | 331 | ||
@@ -528,8 +528,7 @@ static struct _cache_attr subcaches = | |||
528 | #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ | 528 | #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ |
529 | 529 | ||
530 | static int | 530 | static int |
531 | __cpuinit cpuid4_cache_lookup_regs(int index, | 531 | cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) |
532 | struct _cpuid4_info_regs *this_leaf) | ||
533 | { | 532 | { |
534 | union _cpuid4_leaf_eax eax; | 533 | union _cpuid4_leaf_eax eax; |
535 | union _cpuid4_leaf_ebx ebx; | 534 | union _cpuid4_leaf_ebx ebx; |
@@ -560,7 +559,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index, | |||
560 | return 0; | 559 | return 0; |
561 | } | 560 | } |
562 | 561 | ||
563 | static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) | 562 | static int find_num_cache_leaves(struct cpuinfo_x86 *c) |
564 | { | 563 | { |
565 | unsigned int eax, ebx, ecx, edx, op; | 564 | unsigned int eax, ebx, ecx, edx, op; |
566 | union _cpuid4_leaf_eax cache_eax; | 565 | union _cpuid4_leaf_eax cache_eax; |
@@ -580,7 +579,7 @@ static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) | |||
580 | return i; | 579 | return i; |
581 | } | 580 | } |
582 | 581 | ||
583 | void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) | 582 | void init_amd_cacheinfo(struct cpuinfo_x86 *c) |
584 | { | 583 | { |
585 | 584 | ||
586 | if (cpu_has_topoext) { | 585 | if (cpu_has_topoext) { |
@@ -593,7 +592,7 @@ void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) | |||
593 | } | 592 | } |
594 | } | 593 | } |
595 | 594 | ||
596 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | 595 | unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) |
597 | { | 596 | { |
598 | /* Cache sizes */ | 597 | /* Cache sizes */ |
599 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; | 598 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; |
@@ -618,36 +617,34 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
618 | * parameters cpuid leaf to find the cache details | 617 | * parameters cpuid leaf to find the cache details |
619 | */ | 618 | */ |
620 | for (i = 0; i < num_cache_leaves; i++) { | 619 | for (i = 0; i < num_cache_leaves; i++) { |
621 | struct _cpuid4_info_regs this_leaf; | 620 | struct _cpuid4_info_regs this_leaf = {}; |
622 | int retval; | 621 | int retval; |
623 | 622 | ||
624 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); | 623 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
625 | if (retval >= 0) { | 624 | if (retval < 0) |
626 | switch (this_leaf.eax.split.level) { | 625 | continue; |
627 | case 1: | 626 | |
628 | if (this_leaf.eax.split.type == | 627 | switch (this_leaf.eax.split.level) { |
629 | CACHE_TYPE_DATA) | 628 | case 1: |
630 | new_l1d = this_leaf.size/1024; | 629 | if (this_leaf.eax.split.type == CACHE_TYPE_DATA) |
631 | else if (this_leaf.eax.split.type == | 630 | new_l1d = this_leaf.size/1024; |
632 | CACHE_TYPE_INST) | 631 | else if (this_leaf.eax.split.type == CACHE_TYPE_INST) |
633 | new_l1i = this_leaf.size/1024; | 632 | new_l1i = this_leaf.size/1024; |
634 | break; | 633 | break; |
635 | case 2: | 634 | case 2: |
636 | new_l2 = this_leaf.size/1024; | 635 | new_l2 = this_leaf.size/1024; |
637 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 636 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
638 | index_msb = get_count_order(num_threads_sharing); | 637 | index_msb = get_count_order(num_threads_sharing); |
639 | l2_id = c->apicid & ~((1 << index_msb) - 1); | 638 | l2_id = c->apicid & ~((1 << index_msb) - 1); |
640 | break; | 639 | break; |
641 | case 3: | 640 | case 3: |
642 | new_l3 = this_leaf.size/1024; | 641 | new_l3 = this_leaf.size/1024; |
643 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 642 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
644 | index_msb = get_count_order( | 643 | index_msb = get_count_order(num_threads_sharing); |
645 | num_threads_sharing); | 644 | l3_id = c->apicid & ~((1 << index_msb) - 1); |
646 | l3_id = c->apicid & ~((1 << index_msb) - 1); | 645 | break; |
647 | break; | 646 | default: |
648 | default: | 647 | break; |
649 | break; | ||
650 | } | ||
651 | } | 648 | } |
652 | } | 649 | } |
653 | } | 650 | } |
@@ -746,7 +743,7 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | |||
746 | 743 | ||
747 | #ifdef CONFIG_SMP | 744 | #ifdef CONFIG_SMP |
748 | 745 | ||
749 | static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | 746 | static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) |
750 | { | 747 | { |
751 | struct _cpuid4_info *this_leaf; | 748 | struct _cpuid4_info *this_leaf; |
752 | int i, sibling; | 749 | int i, sibling; |
@@ -795,7 +792,7 @@ static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | |||
795 | return 1; | 792 | return 1; |
796 | } | 793 | } |
797 | 794 | ||
798 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 795 | static void cache_shared_cpu_map_setup(unsigned int cpu, int index) |
799 | { | 796 | { |
800 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 797 | struct _cpuid4_info *this_leaf, *sibling_leaf; |
801 | unsigned long num_threads_sharing; | 798 | unsigned long num_threads_sharing; |
@@ -830,7 +827,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
830 | } | 827 | } |
831 | } | 828 | } |
832 | } | 829 | } |
833 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | 830 | static void cache_remove_shared_cpu_map(unsigned int cpu, int index) |
834 | { | 831 | { |
835 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 832 | struct _cpuid4_info *this_leaf, *sibling_leaf; |
836 | int sibling; | 833 | int sibling; |
@@ -843,16 +840,16 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
843 | } | 840 | } |
844 | } | 841 | } |
845 | #else | 842 | #else |
846 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 843 | static void cache_shared_cpu_map_setup(unsigned int cpu, int index) |
847 | { | 844 | { |
848 | } | 845 | } |
849 | 846 | ||
850 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | 847 | static void cache_remove_shared_cpu_map(unsigned int cpu, int index) |
851 | { | 848 | { |
852 | } | 849 | } |
853 | #endif | 850 | #endif |
854 | 851 | ||
855 | static void __cpuinit free_cache_attributes(unsigned int cpu) | 852 | static void free_cache_attributes(unsigned int cpu) |
856 | { | 853 | { |
857 | int i; | 854 | int i; |
858 | 855 | ||
@@ -863,7 +860,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
863 | per_cpu(ici_cpuid4_info, cpu) = NULL; | 860 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
864 | } | 861 | } |
865 | 862 | ||
866 | static void __cpuinit get_cpu_leaves(void *_retval) | 863 | static void get_cpu_leaves(void *_retval) |
867 | { | 864 | { |
868 | int j, *retval = _retval, cpu = smp_processor_id(); | 865 | int j, *retval = _retval, cpu = smp_processor_id(); |
869 | 866 | ||
@@ -883,7 +880,7 @@ static void __cpuinit get_cpu_leaves(void *_retval) | |||
883 | } | 880 | } |
884 | } | 881 | } |
885 | 882 | ||
886 | static int __cpuinit detect_cache_attributes(unsigned int cpu) | 883 | static int detect_cache_attributes(unsigned int cpu) |
887 | { | 884 | { |
888 | int retval; | 885 | int retval; |
889 | 886 | ||
@@ -1017,7 +1014,7 @@ static struct attribute *default_attrs[] = { | |||
1017 | }; | 1014 | }; |
1018 | 1015 | ||
1019 | #ifdef CONFIG_AMD_NB | 1016 | #ifdef CONFIG_AMD_NB |
1020 | static struct attribute ** __cpuinit amd_l3_attrs(void) | 1017 | static struct attribute **amd_l3_attrs(void) |
1021 | { | 1018 | { |
1022 | static struct attribute **attrs; | 1019 | static struct attribute **attrs; |
1023 | int n; | 1020 | int n; |
@@ -1093,7 +1090,7 @@ static struct kobj_type ktype_percpu_entry = { | |||
1093 | .sysfs_ops = &sysfs_ops, | 1090 | .sysfs_ops = &sysfs_ops, |
1094 | }; | 1091 | }; |
1095 | 1092 | ||
1096 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) | 1093 | static void cpuid4_cache_sysfs_exit(unsigned int cpu) |
1097 | { | 1094 | { |
1098 | kfree(per_cpu(ici_cache_kobject, cpu)); | 1095 | kfree(per_cpu(ici_cache_kobject, cpu)); |
1099 | kfree(per_cpu(ici_index_kobject, cpu)); | 1096 | kfree(per_cpu(ici_index_kobject, cpu)); |
@@ -1102,7 +1099,7 @@ static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) | |||
1102 | free_cache_attributes(cpu); | 1099 | free_cache_attributes(cpu); |
1103 | } | 1100 | } |
1104 | 1101 | ||
1105 | static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | 1102 | static int cpuid4_cache_sysfs_init(unsigned int cpu) |
1106 | { | 1103 | { |
1107 | int err; | 1104 | int err; |
1108 | 1105 | ||
@@ -1134,7 +1131,7 @@ err_out: | |||
1134 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); | 1131 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); |
1135 | 1132 | ||
1136 | /* Add/Remove cache interface for CPU device */ | 1133 | /* Add/Remove cache interface for CPU device */ |
1137 | static int __cpuinit cache_add_dev(struct device *dev) | 1134 | static int cache_add_dev(struct device *dev) |
1138 | { | 1135 | { |
1139 | unsigned int cpu = dev->id; | 1136 | unsigned int cpu = dev->id; |
1140 | unsigned long i, j; | 1137 | unsigned long i, j; |
@@ -1185,7 +1182,7 @@ static int __cpuinit cache_add_dev(struct device *dev) | |||
1185 | return 0; | 1182 | return 0; |
1186 | } | 1183 | } |
1187 | 1184 | ||
1188 | static void __cpuinit cache_remove_dev(struct device *dev) | 1185 | static void cache_remove_dev(struct device *dev) |
1189 | { | 1186 | { |
1190 | unsigned int cpu = dev->id; | 1187 | unsigned int cpu = dev->id; |
1191 | unsigned long i; | 1188 | unsigned long i; |
@@ -1202,8 +1199,8 @@ static void __cpuinit cache_remove_dev(struct device *dev) | |||
1202 | cpuid4_cache_sysfs_exit(cpu); | 1199 | cpuid4_cache_sysfs_exit(cpu); |
1203 | } | 1200 | } |
1204 | 1201 | ||
1205 | static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | 1202 | static int cacheinfo_cpu_callback(struct notifier_block *nfb, |
1206 | unsigned long action, void *hcpu) | 1203 | unsigned long action, void *hcpu) |
1207 | { | 1204 | { |
1208 | unsigned int cpu = (unsigned long)hcpu; | 1205 | unsigned int cpu = (unsigned long)hcpu; |
1209 | struct device *dev; | 1206 | struct device *dev; |
@@ -1222,7 +1219,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
1222 | return NOTIFY_OK; | 1219 | return NOTIFY_OK; |
1223 | } | 1220 | } |
1224 | 1221 | ||
1225 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { | 1222 | static struct notifier_block cacheinfo_cpu_notifier = { |
1226 | .notifier_call = cacheinfo_cpu_callback, | 1223 | .notifier_call = cacheinfo_cpu_callback, |
1227 | }; | 1224 | }; |
1228 | 1225 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index ddc72f839332..5ac2d1fb28bc 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -153,7 +153,7 @@ static void raise_mce(struct mce *m) | |||
153 | return; | 153 | return; |
154 | 154 | ||
155 | #ifdef CONFIG_X86_LOCAL_APIC | 155 | #ifdef CONFIG_X86_LOCAL_APIC |
156 | if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) { | 156 | if (m->inject_flags & (MCJ_IRQ_BROADCAST | MCJ_NMI_BROADCAST)) { |
157 | unsigned long start; | 157 | unsigned long start; |
158 | int cpu; | 158 | int cpu; |
159 | 159 | ||
@@ -167,7 +167,7 @@ static void raise_mce(struct mce *m) | |||
167 | cpumask_clear_cpu(cpu, mce_inject_cpumask); | 167 | cpumask_clear_cpu(cpu, mce_inject_cpumask); |
168 | } | 168 | } |
169 | if (!cpumask_empty(mce_inject_cpumask)) { | 169 | if (!cpumask_empty(mce_inject_cpumask)) { |
170 | if (m->inject_flags & MCJ_IRQ_BRAODCAST) { | 170 | if (m->inject_flags & MCJ_IRQ_BROADCAST) { |
171 | /* | 171 | /* |
172 | * don't wait because mce_irq_ipi is necessary | 172 | * don't wait because mce_irq_ipi is necessary |
173 | * to be sync with following raise_local | 173 | * to be sync with following raise_local |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index beb1f1689e52..c370e1c4468b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c | |||
@@ -110,22 +110,17 @@ static struct severity { | |||
110 | /* known AR MCACODs: */ | 110 | /* known AR MCACODs: */ |
111 | #ifdef CONFIG_MEMORY_FAILURE | 111 | #ifdef CONFIG_MEMORY_FAILURE |
112 | MCESEV( | 112 | MCESEV( |
113 | KEEP, "HT thread notices Action required: data load error", | 113 | KEEP, "Action required but unaffected thread is continuable", |
114 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), | 114 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR), |
115 | MCGMASK(MCG_STATUS_EIPV, 0) | 115 | MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV) |
116 | ), | 116 | ), |
117 | MCESEV( | 117 | MCESEV( |
118 | AR, "Action required: data load error", | 118 | AR, "Action required: data load error in a user process", |
119 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), | 119 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), |
120 | USER | 120 | USER |
121 | ), | 121 | ), |
122 | MCESEV( | 122 | MCESEV( |
123 | KEEP, "HT thread notices Action required: instruction fetch error", | 123 | AR, "Action required: instruction fetch error in a user process", |
124 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), | ||
125 | MCGMASK(MCG_STATUS_EIPV, 0) | ||
126 | ), | ||
127 | MCESEV( | ||
128 | AR, "Action required: instruction fetch error", | ||
129 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), | 124 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), |
130 | USER | 125 | USER |
131 | ), | 126 | ), |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 9239504b41cb..87a65c939bcd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -89,7 +89,10 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); | |||
89 | static DEFINE_PER_CPU(struct mce, mces_seen); | 89 | static DEFINE_PER_CPU(struct mce, mces_seen); |
90 | static int cpu_missing; | 90 | static int cpu_missing; |
91 | 91 | ||
92 | /* MCA banks polled by the period polling timer for corrected events */ | 92 | /* |
93 | * MCA banks polled by the period polling timer for corrected events. | ||
94 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). | ||
95 | */ | ||
93 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | 96 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { |
94 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | 97 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL |
95 | }; | 98 | }; |
@@ -1360,7 +1363,7 @@ int mce_notify_irq(void) | |||
1360 | } | 1363 | } |
1361 | EXPORT_SYMBOL_GPL(mce_notify_irq); | 1364 | EXPORT_SYMBOL_GPL(mce_notify_irq); |
1362 | 1365 | ||
1363 | static int __cpuinit __mcheck_cpu_mce_banks_init(void) | 1366 | static int __mcheck_cpu_mce_banks_init(void) |
1364 | { | 1367 | { |
1365 | int i; | 1368 | int i; |
1366 | u8 num_banks = mca_cfg.banks; | 1369 | u8 num_banks = mca_cfg.banks; |
@@ -1381,7 +1384,7 @@ static int __cpuinit __mcheck_cpu_mce_banks_init(void) | |||
1381 | /* | 1384 | /* |
1382 | * Initialize Machine Checks for a CPU. | 1385 | * Initialize Machine Checks for a CPU. |
1383 | */ | 1386 | */ |
1384 | static int __cpuinit __mcheck_cpu_cap_init(void) | 1387 | static int __mcheck_cpu_cap_init(void) |
1385 | { | 1388 | { |
1386 | unsigned b; | 1389 | unsigned b; |
1387 | u64 cap; | 1390 | u64 cap; |
@@ -1480,7 +1483,7 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) | |||
1480 | } | 1483 | } |
1481 | 1484 | ||
1482 | /* Add per CPU specific workarounds here */ | 1485 | /* Add per CPU specific workarounds here */ |
1483 | static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) | 1486 | static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) |
1484 | { | 1487 | { |
1485 | struct mca_config *cfg = &mca_cfg; | 1488 | struct mca_config *cfg = &mca_cfg; |
1486 | 1489 | ||
@@ -1590,7 +1593,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) | |||
1590 | return 0; | 1593 | return 0; |
1591 | } | 1594 | } |
1592 | 1595 | ||
1593 | static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) | 1596 | static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) |
1594 | { | 1597 | { |
1595 | if (c->x86 != 5) | 1598 | if (c->x86 != 5) |
1596 | return 0; | 1599 | return 0; |
@@ -1661,7 +1664,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) = | |||
1661 | * Called for each booted CPU to set up machine checks. | 1664 | * Called for each booted CPU to set up machine checks. |
1662 | * Must be called with preempt off: | 1665 | * Must be called with preempt off: |
1663 | */ | 1666 | */ |
1664 | void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) | 1667 | void mcheck_cpu_init(struct cpuinfo_x86 *c) |
1665 | { | 1668 | { |
1666 | if (mca_cfg.disabled) | 1669 | if (mca_cfg.disabled) |
1667 | return; | 1670 | return; |
@@ -2079,7 +2082,6 @@ static struct bus_type mce_subsys = { | |||
2079 | 2082 | ||
2080 | DEFINE_PER_CPU(struct device *, mce_device); | 2083 | DEFINE_PER_CPU(struct device *, mce_device); |
2081 | 2084 | ||
2082 | __cpuinitdata | ||
2083 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | 2085 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); |
2084 | 2086 | ||
2085 | static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) | 2087 | static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) |
@@ -2225,7 +2227,7 @@ static void mce_device_release(struct device *dev) | |||
2225 | } | 2227 | } |
2226 | 2228 | ||
2227 | /* Per cpu device init. All of the cpus still share the same ctrl bank: */ | 2229 | /* Per cpu device init. All of the cpus still share the same ctrl bank: */ |
2228 | static __cpuinit int mce_device_create(unsigned int cpu) | 2230 | static int mce_device_create(unsigned int cpu) |
2229 | { | 2231 | { |
2230 | struct device *dev; | 2232 | struct device *dev; |
2231 | int err; | 2233 | int err; |
@@ -2271,7 +2273,7 @@ error: | |||
2271 | return err; | 2273 | return err; |
2272 | } | 2274 | } |
2273 | 2275 | ||
2274 | static __cpuinit void mce_device_remove(unsigned int cpu) | 2276 | static void mce_device_remove(unsigned int cpu) |
2275 | { | 2277 | { |
2276 | struct device *dev = per_cpu(mce_device, cpu); | 2278 | struct device *dev = per_cpu(mce_device, cpu); |
2277 | int i; | 2279 | int i; |
@@ -2291,7 +2293,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu) | |||
2291 | } | 2293 | } |
2292 | 2294 | ||
2293 | /* Make sure there are no machine checks on offlined CPUs. */ | 2295 | /* Make sure there are no machine checks on offlined CPUs. */ |
2294 | static void __cpuinit mce_disable_cpu(void *h) | 2296 | static void mce_disable_cpu(void *h) |
2295 | { | 2297 | { |
2296 | unsigned long action = *(unsigned long *)h; | 2298 | unsigned long action = *(unsigned long *)h; |
2297 | int i; | 2299 | int i; |
@@ -2309,7 +2311,7 @@ static void __cpuinit mce_disable_cpu(void *h) | |||
2309 | } | 2311 | } |
2310 | } | 2312 | } |
2311 | 2313 | ||
2312 | static void __cpuinit mce_reenable_cpu(void *h) | 2314 | static void mce_reenable_cpu(void *h) |
2313 | { | 2315 | { |
2314 | unsigned long action = *(unsigned long *)h; | 2316 | unsigned long action = *(unsigned long *)h; |
2315 | int i; | 2317 | int i; |
@@ -2328,7 +2330,7 @@ static void __cpuinit mce_reenable_cpu(void *h) | |||
2328 | } | 2330 | } |
2329 | 2331 | ||
2330 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 2332 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
2331 | static int __cpuinit | 2333 | static int |
2332 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 2334 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
2333 | { | 2335 | { |
2334 | unsigned int cpu = (unsigned long)hcpu; | 2336 | unsigned int cpu = (unsigned long)hcpu; |
@@ -2364,7 +2366,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
2364 | return NOTIFY_OK; | 2366 | return NOTIFY_OK; |
2365 | } | 2367 | } |
2366 | 2368 | ||
2367 | static struct notifier_block mce_cpu_notifier __cpuinitdata = { | 2369 | static struct notifier_block mce_cpu_notifier = { |
2368 | .notifier_call = mce_cpu_callback, | 2370 | .notifier_call = mce_cpu_callback, |
2369 | }; | 2371 | }; |
2370 | 2372 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 9cb52767999a..603df4f74640 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -458,10 +458,8 @@ static struct kobj_type threshold_ktype = { | |||
458 | .default_attrs = default_attrs, | 458 | .default_attrs = default_attrs, |
459 | }; | 459 | }; |
460 | 460 | ||
461 | static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | 461 | static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, |
462 | unsigned int bank, | 462 | unsigned int block, u32 address) |
463 | unsigned int block, | ||
464 | u32 address) | ||
465 | { | 463 | { |
466 | struct threshold_block *b = NULL; | 464 | struct threshold_block *b = NULL; |
467 | u32 low, high; | 465 | u32 low, high; |
@@ -543,7 +541,7 @@ out_free: | |||
543 | return err; | 541 | return err; |
544 | } | 542 | } |
545 | 543 | ||
546 | static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) | 544 | static int __threshold_add_blocks(struct threshold_bank *b) |
547 | { | 545 | { |
548 | struct list_head *head = &b->blocks->miscj; | 546 | struct list_head *head = &b->blocks->miscj; |
549 | struct threshold_block *pos = NULL; | 547 | struct threshold_block *pos = NULL; |
@@ -567,7 +565,7 @@ static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) | |||
567 | return err; | 565 | return err; |
568 | } | 566 | } |
569 | 567 | ||
570 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | 568 | static int threshold_create_bank(unsigned int cpu, unsigned int bank) |
571 | { | 569 | { |
572 | struct device *dev = per_cpu(mce_device, cpu); | 570 | struct device *dev = per_cpu(mce_device, cpu); |
573 | struct amd_northbridge *nb = NULL; | 571 | struct amd_northbridge *nb = NULL; |
@@ -632,7 +630,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
632 | } | 630 | } |
633 | 631 | ||
634 | /* create dir/files for all valid threshold banks */ | 632 | /* create dir/files for all valid threshold banks */ |
635 | static __cpuinit int threshold_create_device(unsigned int cpu) | 633 | static int threshold_create_device(unsigned int cpu) |
636 | { | 634 | { |
637 | unsigned int bank; | 635 | unsigned int bank; |
638 | struct threshold_bank **bp; | 636 | struct threshold_bank **bp; |
@@ -736,7 +734,7 @@ static void threshold_remove_device(unsigned int cpu) | |||
736 | } | 734 | } |
737 | 735 | ||
738 | /* get notified when a cpu comes on/off */ | 736 | /* get notified when a cpu comes on/off */ |
739 | static void __cpuinit | 737 | static void |
740 | amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) | 738 | amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) |
741 | { | 739 | { |
742 | switch (action) { | 740 | switch (action) { |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index ae1697c2afe3..d56405309dc1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -24,6 +24,18 @@ | |||
24 | * Also supports reliable discovery of shared banks. | 24 | * Also supports reliable discovery of shared banks. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | /* | ||
28 | * CMCI can be delivered to multiple cpus that share a machine check bank | ||
29 | * so we need to designate a single cpu to process errors logged in each bank | ||
30 | * in the interrupt handler (otherwise we would have many races and potential | ||
31 | * double reporting of the same error). | ||
32 | * Note that this can change when a cpu is offlined or brought online since | ||
33 | * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear() | ||
34 | * disables CMCI on all banks owned by the cpu and clears this bitfield. At | ||
35 | * this point, cmci_rediscover() kicks in and a different cpu may end up | ||
36 | * taking ownership of some of the shared MCA banks that were previously | ||
37 | * owned by the offlined cpu. | ||
38 | */ | ||
27 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | 39 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); |
28 | 40 | ||
29 | /* | 41 | /* |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 47a1870279aa..3eec7de76efb 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/idle.h> | 29 | #include <asm/idle.h> |
30 | #include <asm/mce.h> | 30 | #include <asm/mce.h> |
31 | #include <asm/msr.h> | 31 | #include <asm/msr.h> |
32 | #include <asm/trace/irq_vectors.h> | ||
32 | 33 | ||
33 | /* How long to wait between reporting thermal events */ | 34 | /* How long to wait between reporting thermal events */ |
34 | #define CHECK_INTERVAL (300 * HZ) | 35 | #define CHECK_INTERVAL (300 * HZ) |
@@ -54,12 +55,24 @@ struct thermal_state { | |||
54 | struct _thermal_state package_power_limit; | 55 | struct _thermal_state package_power_limit; |
55 | struct _thermal_state core_thresh0; | 56 | struct _thermal_state core_thresh0; |
56 | struct _thermal_state core_thresh1; | 57 | struct _thermal_state core_thresh1; |
58 | struct _thermal_state pkg_thresh0; | ||
59 | struct _thermal_state pkg_thresh1; | ||
57 | }; | 60 | }; |
58 | 61 | ||
59 | /* Callback to handle core threshold interrupts */ | 62 | /* Callback to handle core threshold interrupts */ |
60 | int (*platform_thermal_notify)(__u64 msr_val); | 63 | int (*platform_thermal_notify)(__u64 msr_val); |
61 | EXPORT_SYMBOL(platform_thermal_notify); | 64 | EXPORT_SYMBOL(platform_thermal_notify); |
62 | 65 | ||
66 | /* Callback to handle core package threshold_interrupts */ | ||
67 | int (*platform_thermal_package_notify)(__u64 msr_val); | ||
68 | EXPORT_SYMBOL_GPL(platform_thermal_package_notify); | ||
69 | |||
70 | /* Callback support of rate control, return true, if | ||
71 | * callback has rate control */ | ||
72 | bool (*platform_thermal_package_rate_control)(void); | ||
73 | EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control); | ||
74 | |||
75 | |||
63 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); | 76 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); |
64 | 77 | ||
65 | static atomic_t therm_throt_en = ATOMIC_INIT(0); | 78 | static atomic_t therm_throt_en = ATOMIC_INIT(0); |
@@ -181,11 +194,6 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
181 | this_cpu, | 194 | this_cpu, |
182 | level == CORE_LEVEL ? "Core" : "Package", | 195 | level == CORE_LEVEL ? "Core" : "Package", |
183 | state->count); | 196 | state->count); |
184 | else | ||
185 | printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n", | ||
186 | this_cpu, | ||
187 | level == CORE_LEVEL ? "Core" : "Package", | ||
188 | state->count); | ||
189 | return 1; | 197 | return 1; |
190 | } | 198 | } |
191 | if (old_event) { | 199 | if (old_event) { |
@@ -193,36 +201,46 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
193 | printk(KERN_INFO "CPU%d: %s temperature/speed normal\n", | 201 | printk(KERN_INFO "CPU%d: %s temperature/speed normal\n", |
194 | this_cpu, | 202 | this_cpu, |
195 | level == CORE_LEVEL ? "Core" : "Package"); | 203 | level == CORE_LEVEL ? "Core" : "Package"); |
196 | else | ||
197 | printk(KERN_INFO "CPU%d: %s power limit normal\n", | ||
198 | this_cpu, | ||
199 | level == CORE_LEVEL ? "Core" : "Package"); | ||
200 | return 1; | 204 | return 1; |
201 | } | 205 | } |
202 | 206 | ||
203 | return 0; | 207 | return 0; |
204 | } | 208 | } |
205 | 209 | ||
206 | static int thresh_event_valid(int event) | 210 | static int thresh_event_valid(int level, int event) |
207 | { | 211 | { |
208 | struct _thermal_state *state; | 212 | struct _thermal_state *state; |
209 | unsigned int this_cpu = smp_processor_id(); | 213 | unsigned int this_cpu = smp_processor_id(); |
210 | struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); | 214 | struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); |
211 | u64 now = get_jiffies_64(); | 215 | u64 now = get_jiffies_64(); |
212 | 216 | ||
213 | state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1; | 217 | if (level == PACKAGE_LEVEL) |
218 | state = (event == 0) ? &pstate->pkg_thresh0 : | ||
219 | &pstate->pkg_thresh1; | ||
220 | else | ||
221 | state = (event == 0) ? &pstate->core_thresh0 : | ||
222 | &pstate->core_thresh1; | ||
214 | 223 | ||
215 | if (time_before64(now, state->next_check)) | 224 | if (time_before64(now, state->next_check)) |
216 | return 0; | 225 | return 0; |
217 | 226 | ||
218 | state->next_check = now + CHECK_INTERVAL; | 227 | state->next_check = now + CHECK_INTERVAL; |
228 | |||
229 | return 1; | ||
230 | } | ||
231 | |||
232 | static bool int_pln_enable; | ||
233 | static int __init int_pln_enable_setup(char *s) | ||
234 | { | ||
235 | int_pln_enable = true; | ||
236 | |||
219 | return 1; | 237 | return 1; |
220 | } | 238 | } |
239 | __setup("int_pln_enable", int_pln_enable_setup); | ||
221 | 240 | ||
222 | #ifdef CONFIG_SYSFS | 241 | #ifdef CONFIG_SYSFS |
223 | /* Add/Remove thermal_throttle interface for CPU device: */ | 242 | /* Add/Remove thermal_throttle interface for CPU device: */ |
224 | static __cpuinit int thermal_throttle_add_dev(struct device *dev, | 243 | static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu) |
225 | unsigned int cpu) | ||
226 | { | 244 | { |
227 | int err; | 245 | int err; |
228 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 246 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
@@ -231,7 +249,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev, | |||
231 | if (err) | 249 | if (err) |
232 | return err; | 250 | return err; |
233 | 251 | ||
234 | if (cpu_has(c, X86_FEATURE_PLN)) | 252 | if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) |
235 | err = sysfs_add_file_to_group(&dev->kobj, | 253 | err = sysfs_add_file_to_group(&dev->kobj, |
236 | &dev_attr_core_power_limit_count.attr, | 254 | &dev_attr_core_power_limit_count.attr, |
237 | thermal_attr_group.name); | 255 | thermal_attr_group.name); |
@@ -239,7 +257,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev, | |||
239 | err = sysfs_add_file_to_group(&dev->kobj, | 257 | err = sysfs_add_file_to_group(&dev->kobj, |
240 | &dev_attr_package_throttle_count.attr, | 258 | &dev_attr_package_throttle_count.attr, |
241 | thermal_attr_group.name); | 259 | thermal_attr_group.name); |
242 | if (cpu_has(c, X86_FEATURE_PLN)) | 260 | if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) |
243 | err = sysfs_add_file_to_group(&dev->kobj, | 261 | err = sysfs_add_file_to_group(&dev->kobj, |
244 | &dev_attr_package_power_limit_count.attr, | 262 | &dev_attr_package_power_limit_count.attr, |
245 | thermal_attr_group.name); | 263 | thermal_attr_group.name); |
@@ -248,7 +266,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev, | |||
248 | return err; | 266 | return err; |
249 | } | 267 | } |
250 | 268 | ||
251 | static __cpuinit void thermal_throttle_remove_dev(struct device *dev) | 269 | static void thermal_throttle_remove_dev(struct device *dev) |
252 | { | 270 | { |
253 | sysfs_remove_group(&dev->kobj, &thermal_attr_group); | 271 | sysfs_remove_group(&dev->kobj, &thermal_attr_group); |
254 | } | 272 | } |
@@ -257,7 +275,7 @@ static __cpuinit void thermal_throttle_remove_dev(struct device *dev) | |||
257 | static DEFINE_MUTEX(therm_cpu_lock); | 275 | static DEFINE_MUTEX(therm_cpu_lock); |
258 | 276 | ||
259 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 277 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
260 | static __cpuinit int | 278 | static int |
261 | thermal_throttle_cpu_callback(struct notifier_block *nfb, | 279 | thermal_throttle_cpu_callback(struct notifier_block *nfb, |
262 | unsigned long action, | 280 | unsigned long action, |
263 | void *hcpu) | 281 | void *hcpu) |
@@ -288,7 +306,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, | |||
288 | return notifier_from_errno(err); | 306 | return notifier_from_errno(err); |
289 | } | 307 | } |
290 | 308 | ||
291 | static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = | 309 | static struct notifier_block thermal_throttle_cpu_notifier = |
292 | { | 310 | { |
293 | .notifier_call = thermal_throttle_cpu_callback, | 311 | .notifier_call = thermal_throttle_cpu_callback, |
294 | }; | 312 | }; |
@@ -321,6 +339,39 @@ device_initcall(thermal_throttle_init_device); | |||
321 | 339 | ||
322 | #endif /* CONFIG_SYSFS */ | 340 | #endif /* CONFIG_SYSFS */ |
323 | 341 | ||
342 | static void notify_package_thresholds(__u64 msr_val) | ||
343 | { | ||
344 | bool notify_thres_0 = false; | ||
345 | bool notify_thres_1 = false; | ||
346 | |||
347 | if (!platform_thermal_package_notify) | ||
348 | return; | ||
349 | |||
350 | /* lower threshold check */ | ||
351 | if (msr_val & THERM_LOG_THRESHOLD0) | ||
352 | notify_thres_0 = true; | ||
353 | /* higher threshold check */ | ||
354 | if (msr_val & THERM_LOG_THRESHOLD1) | ||
355 | notify_thres_1 = true; | ||
356 | |||
357 | if (!notify_thres_0 && !notify_thres_1) | ||
358 | return; | ||
359 | |||
360 | if (platform_thermal_package_rate_control && | ||
361 | platform_thermal_package_rate_control()) { | ||
362 | /* Rate control is implemented in callback */ | ||
363 | platform_thermal_package_notify(msr_val); | ||
364 | return; | ||
365 | } | ||
366 | |||
367 | /* lower threshold reached */ | ||
368 | if (notify_thres_0 && thresh_event_valid(PACKAGE_LEVEL, 0)) | ||
369 | platform_thermal_package_notify(msr_val); | ||
370 | /* higher threshold reached */ | ||
371 | if (notify_thres_1 && thresh_event_valid(PACKAGE_LEVEL, 1)) | ||
372 | platform_thermal_package_notify(msr_val); | ||
373 | } | ||
374 | |||
324 | static void notify_thresholds(__u64 msr_val) | 375 | static void notify_thresholds(__u64 msr_val) |
325 | { | 376 | { |
326 | /* check whether the interrupt handler is defined; | 377 | /* check whether the interrupt handler is defined; |
@@ -330,10 +381,12 @@ static void notify_thresholds(__u64 msr_val) | |||
330 | return; | 381 | return; |
331 | 382 | ||
332 | /* lower threshold reached */ | 383 | /* lower threshold reached */ |
333 | if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(0)) | 384 | if ((msr_val & THERM_LOG_THRESHOLD0) && |
385 | thresh_event_valid(CORE_LEVEL, 0)) | ||
334 | platform_thermal_notify(msr_val); | 386 | platform_thermal_notify(msr_val); |
335 | /* higher threshold reached */ | 387 | /* higher threshold reached */ |
336 | if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(1)) | 388 | if ((msr_val & THERM_LOG_THRESHOLD1) && |
389 | thresh_event_valid(CORE_LEVEL, 1)) | ||
337 | platform_thermal_notify(msr_val); | 390 | platform_thermal_notify(msr_val); |
338 | } | 391 | } |
339 | 392 | ||
@@ -352,17 +405,19 @@ static void intel_thermal_interrupt(void) | |||
352 | CORE_LEVEL) != 0) | 405 | CORE_LEVEL) != 0) |
353 | mce_log_therm_throt_event(msr_val); | 406 | mce_log_therm_throt_event(msr_val); |
354 | 407 | ||
355 | if (this_cpu_has(X86_FEATURE_PLN)) | 408 | if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable) |
356 | therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, | 409 | therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, |
357 | POWER_LIMIT_EVENT, | 410 | POWER_LIMIT_EVENT, |
358 | CORE_LEVEL); | 411 | CORE_LEVEL); |
359 | 412 | ||
360 | if (this_cpu_has(X86_FEATURE_PTS)) { | 413 | if (this_cpu_has(X86_FEATURE_PTS)) { |
361 | rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); | 414 | rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); |
415 | /* check violations of package thermal thresholds */ | ||
416 | notify_package_thresholds(msr_val); | ||
362 | therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, | 417 | therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, |
363 | THERMAL_THROTTLING_EVENT, | 418 | THERMAL_THROTTLING_EVENT, |
364 | PACKAGE_LEVEL); | 419 | PACKAGE_LEVEL); |
365 | if (this_cpu_has(X86_FEATURE_PLN)) | 420 | if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable) |
366 | therm_throt_process(msr_val & | 421 | therm_throt_process(msr_val & |
367 | PACKAGE_THERM_STATUS_POWER_LIMIT, | 422 | PACKAGE_THERM_STATUS_POWER_LIMIT, |
368 | POWER_LIMIT_EVENT, | 423 | POWER_LIMIT_EVENT, |
@@ -378,15 +433,26 @@ static void unexpected_thermal_interrupt(void) | |||
378 | 433 | ||
379 | static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; | 434 | static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; |
380 | 435 | ||
381 | asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) | 436 | static inline void __smp_thermal_interrupt(void) |
382 | { | 437 | { |
383 | irq_enter(); | ||
384 | exit_idle(); | ||
385 | inc_irq_stat(irq_thermal_count); | 438 | inc_irq_stat(irq_thermal_count); |
386 | smp_thermal_vector(); | 439 | smp_thermal_vector(); |
387 | irq_exit(); | 440 | } |
388 | /* Ack only at the end to avoid potential reentry */ | 441 | |
389 | ack_APIC_irq(); | 442 | asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) |
443 | { | ||
444 | entering_irq(); | ||
445 | __smp_thermal_interrupt(); | ||
446 | exiting_ack_irq(); | ||
447 | } | ||
448 | |||
449 | asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs) | ||
450 | { | ||
451 | entering_irq(); | ||
452 | trace_thermal_apic_entry(THERMAL_APIC_VECTOR); | ||
453 | __smp_thermal_interrupt(); | ||
454 | trace_thermal_apic_exit(THERMAL_APIC_VECTOR); | ||
455 | exiting_ack_irq(); | ||
390 | } | 456 | } |
391 | 457 | ||
392 | /* Thermal monitoring depends on APIC, ACPI and clock modulation */ | 458 | /* Thermal monitoring depends on APIC, ACPI and clock modulation */ |
@@ -470,9 +536,13 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
470 | apic_write(APIC_LVTTHMR, h); | 536 | apic_write(APIC_LVTTHMR, h); |
471 | 537 | ||
472 | rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); | 538 | rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); |
473 | if (cpu_has(c, X86_FEATURE_PLN)) | 539 | if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable) |
540 | wrmsr(MSR_IA32_THERM_INTERRUPT, | ||
541 | (l | (THERM_INT_LOW_ENABLE | ||
542 | | THERM_INT_HIGH_ENABLE)) & ~THERM_INT_PLN_ENABLE, h); | ||
543 | else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) | ||
474 | wrmsr(MSR_IA32_THERM_INTERRUPT, | 544 | wrmsr(MSR_IA32_THERM_INTERRUPT, |
475 | l | (THERM_INT_LOW_ENABLE | 545 | l | (THERM_INT_LOW_ENABLE |
476 | | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h); | 546 | | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h); |
477 | else | 547 | else |
478 | wrmsr(MSR_IA32_THERM_INTERRUPT, | 548 | wrmsr(MSR_IA32_THERM_INTERRUPT, |
@@ -480,9 +550,14 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
480 | 550 | ||
481 | if (cpu_has(c, X86_FEATURE_PTS)) { | 551 | if (cpu_has(c, X86_FEATURE_PTS)) { |
482 | rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); | 552 | rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); |
483 | if (cpu_has(c, X86_FEATURE_PLN)) | 553 | if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable) |
484 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, | 554 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, |
485 | l | (PACKAGE_THERM_INT_LOW_ENABLE | 555 | (l | (PACKAGE_THERM_INT_LOW_ENABLE |
556 | | PACKAGE_THERM_INT_HIGH_ENABLE)) | ||
557 | & ~PACKAGE_THERM_INT_PLN_ENABLE, h); | ||
558 | else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) | ||
559 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, | ||
560 | l | (PACKAGE_THERM_INT_LOW_ENABLE | ||
486 | | PACKAGE_THERM_INT_HIGH_ENABLE | 561 | | PACKAGE_THERM_INT_HIGH_ENABLE |
487 | | PACKAGE_THERM_INT_PLN_ENABLE), h); | 562 | | PACKAGE_THERM_INT_PLN_ENABLE), h); |
488 | else | 563 | else |
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c index aa578cadb940..fe6b1c86645b 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mcheck/threshold.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <asm/apic.h> | 8 | #include <asm/apic.h> |
9 | #include <asm/idle.h> | 9 | #include <asm/idle.h> |
10 | #include <asm/mce.h> | 10 | #include <asm/mce.h> |
11 | #include <asm/trace/irq_vectors.h> | ||
11 | 12 | ||
12 | static void default_threshold_interrupt(void) | 13 | static void default_threshold_interrupt(void) |
13 | { | 14 | { |
@@ -17,13 +18,24 @@ static void default_threshold_interrupt(void) | |||
17 | 18 | ||
18 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; | 19 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; |
19 | 20 | ||
20 | asmlinkage void smp_threshold_interrupt(void) | 21 | static inline void __smp_threshold_interrupt(void) |
21 | { | 22 | { |
22 | irq_enter(); | ||
23 | exit_idle(); | ||
24 | inc_irq_stat(irq_threshold_count); | 23 | inc_irq_stat(irq_threshold_count); |
25 | mce_threshold_vector(); | 24 | mce_threshold_vector(); |
26 | irq_exit(); | 25 | } |
27 | /* Ack only at the end to avoid potential reentry */ | 26 | |
28 | ack_APIC_irq(); | 27 | asmlinkage void smp_threshold_interrupt(void) |
28 | { | ||
29 | entering_irq(); | ||
30 | __smp_threshold_interrupt(); | ||
31 | exiting_ack_irq(); | ||
32 | } | ||
33 | |||
34 | asmlinkage void smp_trace_threshold_interrupt(void) | ||
35 | { | ||
36 | entering_irq(); | ||
37 | trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); | ||
38 | __smp_threshold_interrupt(); | ||
39 | trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR); | ||
40 | exiting_ack_irq(); | ||
29 | } | 41 | } |
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index 68a3343e5798..9e451b0876b5 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c | |||
@@ -167,7 +167,7 @@ static void post_set(void) | |||
167 | setCx86(CX86_CCR3, ccr3); | 167 | setCx86(CX86_CCR3, ccr3); |
168 | 168 | ||
169 | /* Enable caches */ | 169 | /* Enable caches */ |
170 | write_cr0(read_cr0() & 0xbfffffff); | 170 | write_cr0(read_cr0() & ~X86_CR0_CD); |
171 | 171 | ||
172 | /* Restore value of CR4 */ | 172 | /* Restore value of CR4 */ |
173 | if (cpu_has_pge) | 173 | if (cpu_has_pge) |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index fa72a39e5d46..d4cdfa67509e 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -510,8 +510,9 @@ generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
510 | static void generic_get_mtrr(unsigned int reg, unsigned long *base, | 510 | static void generic_get_mtrr(unsigned int reg, unsigned long *base, |
511 | unsigned long *size, mtrr_type *type) | 511 | unsigned long *size, mtrr_type *type) |
512 | { | 512 | { |
513 | unsigned int mask_lo, mask_hi, base_lo, base_hi; | 513 | u32 mask_lo, mask_hi, base_lo, base_hi; |
514 | unsigned int tmp, hi; | 514 | unsigned int hi; |
515 | u64 tmp, mask; | ||
515 | 516 | ||
516 | /* | 517 | /* |
517 | * get_mtrr doesn't need to update mtrr_state, also it could be called | 518 | * get_mtrr doesn't need to update mtrr_state, also it could be called |
@@ -532,18 +533,18 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
532 | rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); | 533 | rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); |
533 | 534 | ||
534 | /* Work out the shifted address mask: */ | 535 | /* Work out the shifted address mask: */ |
535 | tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; | 536 | tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; |
536 | mask_lo = size_or_mask | tmp; | 537 | mask = size_or_mask | tmp; |
537 | 538 | ||
538 | /* Expand tmp with high bits to all 1s: */ | 539 | /* Expand tmp with high bits to all 1s: */ |
539 | hi = fls(tmp); | 540 | hi = fls64(tmp); |
540 | if (hi > 0) { | 541 | if (hi > 0) { |
541 | tmp |= ~((1<<(hi - 1)) - 1); | 542 | tmp |= ~((1ULL<<(hi - 1)) - 1); |
542 | 543 | ||
543 | if (tmp != mask_lo) { | 544 | if (tmp != mask) { |
544 | printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); | 545 | printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); |
545 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | 546 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); |
546 | mask_lo = tmp; | 547 | mask = tmp; |
547 | } | 548 | } |
548 | } | 549 | } |
549 | 550 | ||
@@ -551,8 +552,8 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
551 | * This works correctly if size is a power of two, i.e. a | 552 | * This works correctly if size is a power of two, i.e. a |
552 | * contiguous range: | 553 | * contiguous range: |
553 | */ | 554 | */ |
554 | *size = -mask_lo; | 555 | *size = -mask; |
555 | *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; | 556 | *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; |
556 | *type = base_lo & 0xff; | 557 | *type = base_lo & 0xff; |
557 | 558 | ||
558 | out_put_cpu: | 559 | out_put_cpu: |
@@ -701,7 +702,7 @@ static void post_set(void) __releases(set_atomicity_lock) | |||
701 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); | 702 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
702 | 703 | ||
703 | /* Enable caches */ | 704 | /* Enable caches */ |
704 | write_cr0(read_cr0() & 0xbfffffff); | 705 | write_cr0(read_cr0() & ~X86_CR0_CD); |
705 | 706 | ||
706 | /* Restore value of CR4 */ | 707 | /* Restore value of CR4 */ |
707 | if (cpu_has_pge) | 708 | if (cpu_has_pge) |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 726bf963c227..f961de9964c7 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -51,9 +51,13 @@ | |||
51 | #include <asm/e820.h> | 51 | #include <asm/e820.h> |
52 | #include <asm/mtrr.h> | 52 | #include <asm/mtrr.h> |
53 | #include <asm/msr.h> | 53 | #include <asm/msr.h> |
54 | #include <asm/pat.h> | ||
54 | 55 | ||
55 | #include "mtrr.h" | 56 | #include "mtrr.h" |
56 | 57 | ||
58 | /* arch_phys_wc_add returns an MTRR register index plus this offset. */ | ||
59 | #define MTRR_TO_PHYS_WC_OFFSET 1000 | ||
60 | |||
57 | u32 num_var_ranges; | 61 | u32 num_var_ranges; |
58 | 62 | ||
59 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | 63 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; |
@@ -305,7 +309,8 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
305 | return -EINVAL; | 309 | return -EINVAL; |
306 | } | 310 | } |
307 | 311 | ||
308 | if (base & size_or_mask || size & size_or_mask) { | 312 | if ((base | (base + size - 1)) >> |
313 | (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { | ||
309 | pr_warning("mtrr: base or size exceeds the MTRR width\n"); | 314 | pr_warning("mtrr: base or size exceeds the MTRR width\n"); |
310 | return -EINVAL; | 315 | return -EINVAL; |
311 | } | 316 | } |
@@ -524,6 +529,73 @@ int mtrr_del(int reg, unsigned long base, unsigned long size) | |||
524 | } | 529 | } |
525 | EXPORT_SYMBOL(mtrr_del); | 530 | EXPORT_SYMBOL(mtrr_del); |
526 | 531 | ||
532 | /** | ||
533 | * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable | ||
534 | * @base: Physical base address | ||
535 | * @size: Size of region | ||
536 | * | ||
537 | * If PAT is available, this does nothing. If PAT is unavailable, it | ||
538 | * attempts to add a WC MTRR covering size bytes starting at base and | ||
539 | * logs an error if this fails. | ||
540 | * | ||
541 | * Drivers must store the return value to pass to mtrr_del_wc_if_needed, | ||
542 | * but drivers should not try to interpret that return value. | ||
543 | */ | ||
544 | int arch_phys_wc_add(unsigned long base, unsigned long size) | ||
545 | { | ||
546 | int ret; | ||
547 | |||
548 | if (pat_enabled) | ||
549 | return 0; /* Success! (We don't need to do anything.) */ | ||
550 | |||
551 | ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true); | ||
552 | if (ret < 0) { | ||
553 | pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.", | ||
554 | (void *)base, (void *)(base + size - 1)); | ||
555 | return ret; | ||
556 | } | ||
557 | return ret + MTRR_TO_PHYS_WC_OFFSET; | ||
558 | } | ||
559 | EXPORT_SYMBOL(arch_phys_wc_add); | ||
560 | |||
561 | /* | ||
562 | * arch_phys_wc_del - undoes arch_phys_wc_add | ||
563 | * @handle: Return value from arch_phys_wc_add | ||
564 | * | ||
565 | * This cleans up after mtrr_add_wc_if_needed. | ||
566 | * | ||
567 | * The API guarantees that mtrr_del_wc_if_needed(error code) and | ||
568 | * mtrr_del_wc_if_needed(0) do nothing. | ||
569 | */ | ||
570 | void arch_phys_wc_del(int handle) | ||
571 | { | ||
572 | if (handle >= 1) { | ||
573 | WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET); | ||
574 | mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0); | ||
575 | } | ||
576 | } | ||
577 | EXPORT_SYMBOL(arch_phys_wc_del); | ||
578 | |||
579 | /* | ||
580 | * phys_wc_to_mtrr_index - translates arch_phys_wc_add's return value | ||
581 | * @handle: Return value from arch_phys_wc_add | ||
582 | * | ||
583 | * This will turn the return value from arch_phys_wc_add into an mtrr | ||
584 | * index suitable for debugging. | ||
585 | * | ||
586 | * Note: There is no legitimate use for this function, except possibly | ||
587 | * in printk line. Alas there is an illegitimate use in some ancient | ||
588 | * drm ioctls. | ||
589 | */ | ||
590 | int phys_wc_to_mtrr_index(int handle) | ||
591 | { | ||
592 | if (handle < MTRR_TO_PHYS_WC_OFFSET) | ||
593 | return -1; | ||
594 | else | ||
595 | return handle - MTRR_TO_PHYS_WC_OFFSET; | ||
596 | } | ||
597 | EXPORT_SYMBOL_GPL(phys_wc_to_mtrr_index); | ||
598 | |||
527 | /* | 599 | /* |
528 | * HACK ALERT! | 600 | * HACK ALERT! |
529 | * These should be called implicitly, but we can't yet until all the initcall | 601 | * These should be called implicitly, but we can't yet until all the initcall |
@@ -583,6 +655,7 @@ static struct syscore_ops mtrr_syscore_ops = { | |||
583 | 655 | ||
584 | int __initdata changed_by_mtrr_cleanup; | 656 | int __initdata changed_by_mtrr_cleanup; |
585 | 657 | ||
658 | #define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1)) | ||
586 | /** | 659 | /** |
587 | * mtrr_bp_init - initialize mtrrs on the boot CPU | 660 | * mtrr_bp_init - initialize mtrrs on the boot CPU |
588 | * | 661 | * |
@@ -600,7 +673,7 @@ void __init mtrr_bp_init(void) | |||
600 | 673 | ||
601 | if (cpu_has_mtrr) { | 674 | if (cpu_has_mtrr) { |
602 | mtrr_if = &generic_mtrr_ops; | 675 | mtrr_if = &generic_mtrr_ops; |
603 | size_or_mask = 0xff000000; /* 36 bits */ | 676 | size_or_mask = SIZE_OR_MASK_BITS(36); |
604 | size_and_mask = 0x00f00000; | 677 | size_and_mask = 0x00f00000; |
605 | phys_addr = 36; | 678 | phys_addr = 36; |
606 | 679 | ||
@@ -619,7 +692,7 @@ void __init mtrr_bp_init(void) | |||
619 | boot_cpu_data.x86_mask == 0x4)) | 692 | boot_cpu_data.x86_mask == 0x4)) |
620 | phys_addr = 36; | 693 | phys_addr = 36; |
621 | 694 | ||
622 | size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1); | 695 | size_or_mask = SIZE_OR_MASK_BITS(phys_addr); |
623 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; | 696 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; |
624 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && | 697 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && |
625 | boot_cpu_data.x86 == 6) { | 698 | boot_cpu_data.x86 == 6) { |
@@ -627,7 +700,7 @@ void __init mtrr_bp_init(void) | |||
627 | * VIA C* family have Intel style MTRRs, | 700 | * VIA C* family have Intel style MTRRs, |
628 | * but don't support PAE | 701 | * but don't support PAE |
629 | */ | 702 | */ |
630 | size_or_mask = 0xfff00000; /* 32 bits */ | 703 | size_or_mask = SIZE_OR_MASK_BITS(32); |
631 | size_and_mask = 0; | 704 | size_and_mask = 0; |
632 | phys_addr = 32; | 705 | phys_addr = 32; |
633 | } | 706 | } |
@@ -637,21 +710,21 @@ void __init mtrr_bp_init(void) | |||
637 | if (cpu_has_k6_mtrr) { | 710 | if (cpu_has_k6_mtrr) { |
638 | /* Pre-Athlon (K6) AMD CPU MTRRs */ | 711 | /* Pre-Athlon (K6) AMD CPU MTRRs */ |
639 | mtrr_if = mtrr_ops[X86_VENDOR_AMD]; | 712 | mtrr_if = mtrr_ops[X86_VENDOR_AMD]; |
640 | size_or_mask = 0xfff00000; /* 32 bits */ | 713 | size_or_mask = SIZE_OR_MASK_BITS(32); |
641 | size_and_mask = 0; | 714 | size_and_mask = 0; |
642 | } | 715 | } |
643 | break; | 716 | break; |
644 | case X86_VENDOR_CENTAUR: | 717 | case X86_VENDOR_CENTAUR: |
645 | if (cpu_has_centaur_mcr) { | 718 | if (cpu_has_centaur_mcr) { |
646 | mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; | 719 | mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; |
647 | size_or_mask = 0xfff00000; /* 32 bits */ | 720 | size_or_mask = SIZE_OR_MASK_BITS(32); |
648 | size_and_mask = 0; | 721 | size_and_mask = 0; |
649 | } | 722 | } |
650 | break; | 723 | break; |
651 | case X86_VENDOR_CYRIX: | 724 | case X86_VENDOR_CYRIX: |
652 | if (cpu_has_cyrix_arr) { | 725 | if (cpu_has_cyrix_arr) { |
653 | mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; | 726 | mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; |
654 | size_or_mask = 0xfff00000; /* 32 bits */ | 727 | size_or_mask = SIZE_OR_MASK_BITS(32); |
655 | size_and_mask = 0; | 728 | size_and_mask = 0; |
656 | } | 729 | } |
657 | break; | 730 | break; |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1025f3c99d20..a7c7305030cc 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -403,7 +403,8 @@ int x86_pmu_hw_config(struct perf_event *event) | |||
403 | * check that PEBS LBR correction does not conflict with | 403 | * check that PEBS LBR correction does not conflict with |
404 | * whatever the user is asking with attr->branch_sample_type | 404 | * whatever the user is asking with attr->branch_sample_type |
405 | */ | 405 | */ |
406 | if (event->attr.precise_ip > 1) { | 406 | if (event->attr.precise_ip > 1 && |
407 | x86_pmu.intel_cap.pebs_format < 2) { | ||
407 | u64 *br_type = &event->attr.branch_sample_type; | 408 | u64 *br_type = &event->attr.branch_sample_type; |
408 | 409 | ||
409 | if (has_branch_stack(event)) { | 410 | if (has_branch_stack(event)) { |
@@ -568,7 +569,7 @@ struct sched_state { | |||
568 | struct perf_sched { | 569 | struct perf_sched { |
569 | int max_weight; | 570 | int max_weight; |
570 | int max_events; | 571 | int max_events; |
571 | struct event_constraint **constraints; | 572 | struct perf_event **events; |
572 | struct sched_state state; | 573 | struct sched_state state; |
573 | int saved_states; | 574 | int saved_states; |
574 | struct sched_state saved[SCHED_STATES_MAX]; | 575 | struct sched_state saved[SCHED_STATES_MAX]; |
@@ -577,7 +578,7 @@ struct perf_sched { | |||
577 | /* | 578 | /* |
578 | * Initialize interator that runs through all events and counters. | 579 | * Initialize interator that runs through all events and counters. |
579 | */ | 580 | */ |
580 | static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c, | 581 | static void perf_sched_init(struct perf_sched *sched, struct perf_event **events, |
581 | int num, int wmin, int wmax) | 582 | int num, int wmin, int wmax) |
582 | { | 583 | { |
583 | int idx; | 584 | int idx; |
@@ -585,10 +586,10 @@ static void perf_sched_init(struct perf_sched *sched, struct event_constraint ** | |||
585 | memset(sched, 0, sizeof(*sched)); | 586 | memset(sched, 0, sizeof(*sched)); |
586 | sched->max_events = num; | 587 | sched->max_events = num; |
587 | sched->max_weight = wmax; | 588 | sched->max_weight = wmax; |
588 | sched->constraints = c; | 589 | sched->events = events; |
589 | 590 | ||
590 | for (idx = 0; idx < num; idx++) { | 591 | for (idx = 0; idx < num; idx++) { |
591 | if (c[idx]->weight == wmin) | 592 | if (events[idx]->hw.constraint->weight == wmin) |
592 | break; | 593 | break; |
593 | } | 594 | } |
594 | 595 | ||
@@ -635,8 +636,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) | |||
635 | if (sched->state.event >= sched->max_events) | 636 | if (sched->state.event >= sched->max_events) |
636 | return false; | 637 | return false; |
637 | 638 | ||
638 | c = sched->constraints[sched->state.event]; | 639 | c = sched->events[sched->state.event]->hw.constraint; |
639 | |||
640 | /* Prefer fixed purpose counters */ | 640 | /* Prefer fixed purpose counters */ |
641 | if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { | 641 | if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { |
642 | idx = INTEL_PMC_IDX_FIXED; | 642 | idx = INTEL_PMC_IDX_FIXED; |
@@ -694,7 +694,7 @@ static bool perf_sched_next_event(struct perf_sched *sched) | |||
694 | if (sched->state.weight > sched->max_weight) | 694 | if (sched->state.weight > sched->max_weight) |
695 | return false; | 695 | return false; |
696 | } | 696 | } |
697 | c = sched->constraints[sched->state.event]; | 697 | c = sched->events[sched->state.event]->hw.constraint; |
698 | } while (c->weight != sched->state.weight); | 698 | } while (c->weight != sched->state.weight); |
699 | 699 | ||
700 | sched->state.counter = 0; /* start with first counter */ | 700 | sched->state.counter = 0; /* start with first counter */ |
@@ -705,12 +705,12 @@ static bool perf_sched_next_event(struct perf_sched *sched) | |||
705 | /* | 705 | /* |
706 | * Assign a counter for each event. | 706 | * Assign a counter for each event. |
707 | */ | 707 | */ |
708 | int perf_assign_events(struct event_constraint **constraints, int n, | 708 | int perf_assign_events(struct perf_event **events, int n, |
709 | int wmin, int wmax, int *assign) | 709 | int wmin, int wmax, int *assign) |
710 | { | 710 | { |
711 | struct perf_sched sched; | 711 | struct perf_sched sched; |
712 | 712 | ||
713 | perf_sched_init(&sched, constraints, n, wmin, wmax); | 713 | perf_sched_init(&sched, events, n, wmin, wmax); |
714 | 714 | ||
715 | do { | 715 | do { |
716 | if (!perf_sched_find_counter(&sched)) | 716 | if (!perf_sched_find_counter(&sched)) |
@@ -724,16 +724,19 @@ int perf_assign_events(struct event_constraint **constraints, int n, | |||
724 | 724 | ||
725 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | 725 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) |
726 | { | 726 | { |
727 | struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; | 727 | struct event_constraint *c; |
728 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 728 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
729 | struct perf_event *e; | ||
729 | int i, wmin, wmax, num = 0; | 730 | int i, wmin, wmax, num = 0; |
730 | struct hw_perf_event *hwc; | 731 | struct hw_perf_event *hwc; |
731 | 732 | ||
732 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); | 733 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); |
733 | 734 | ||
734 | for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { | 735 | for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { |
736 | hwc = &cpuc->event_list[i]->hw; | ||
735 | c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); | 737 | c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); |
736 | constraints[i] = c; | 738 | hwc->constraint = c; |
739 | |||
737 | wmin = min(wmin, c->weight); | 740 | wmin = min(wmin, c->weight); |
738 | wmax = max(wmax, c->weight); | 741 | wmax = max(wmax, c->weight); |
739 | } | 742 | } |
@@ -743,7 +746,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
743 | */ | 746 | */ |
744 | for (i = 0; i < n; i++) { | 747 | for (i = 0; i < n; i++) { |
745 | hwc = &cpuc->event_list[i]->hw; | 748 | hwc = &cpuc->event_list[i]->hw; |
746 | c = constraints[i]; | 749 | c = hwc->constraint; |
747 | 750 | ||
748 | /* never assigned */ | 751 | /* never assigned */ |
749 | if (hwc->idx == -1) | 752 | if (hwc->idx == -1) |
@@ -764,16 +767,35 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
764 | 767 | ||
765 | /* slow path */ | 768 | /* slow path */ |
766 | if (i != n) | 769 | if (i != n) |
767 | num = perf_assign_events(constraints, n, wmin, wmax, assign); | 770 | num = perf_assign_events(cpuc->event_list, n, wmin, |
771 | wmax, assign); | ||
768 | 772 | ||
769 | /* | 773 | /* |
774 | * Mark the event as committed, so we do not put_constraint() | ||
775 | * in case new events are added and fail scheduling. | ||
776 | */ | ||
777 | if (!num && assign) { | ||
778 | for (i = 0; i < n; i++) { | ||
779 | e = cpuc->event_list[i]; | ||
780 | e->hw.flags |= PERF_X86_EVENT_COMMITTED; | ||
781 | } | ||
782 | } | ||
783 | /* | ||
770 | * scheduling failed or is just a simulation, | 784 | * scheduling failed or is just a simulation, |
771 | * free resources if necessary | 785 | * free resources if necessary |
772 | */ | 786 | */ |
773 | if (!assign || num) { | 787 | if (!assign || num) { |
774 | for (i = 0; i < n; i++) { | 788 | for (i = 0; i < n; i++) { |
789 | e = cpuc->event_list[i]; | ||
790 | /* | ||
791 | * do not put_constraint() on comitted events, | ||
792 | * because they are good to go | ||
793 | */ | ||
794 | if ((e->hw.flags & PERF_X86_EVENT_COMMITTED)) | ||
795 | continue; | ||
796 | |||
775 | if (x86_pmu.put_event_constraints) | 797 | if (x86_pmu.put_event_constraints) |
776 | x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); | 798 | x86_pmu.put_event_constraints(cpuc, e); |
777 | } | 799 | } |
778 | } | 800 | } |
779 | return num ? -EINVAL : 0; | 801 | return num ? -EINVAL : 0; |
@@ -1153,6 +1175,11 @@ static void x86_pmu_del(struct perf_event *event, int flags) | |||
1153 | int i; | 1175 | int i; |
1154 | 1176 | ||
1155 | /* | 1177 | /* |
1178 | * event is descheduled | ||
1179 | */ | ||
1180 | event->hw.flags &= ~PERF_X86_EVENT_COMMITTED; | ||
1181 | |||
1182 | /* | ||
1156 | * If we're called during a txn, we don't need to do anything. | 1183 | * If we're called during a txn, we don't need to do anything. |
1157 | * The events never got scheduled and ->cancel_txn will truncate | 1184 | * The events never got scheduled and ->cancel_txn will truncate |
1158 | * the event_list. | 1185 | * the event_list. |
@@ -1249,16 +1276,26 @@ void perf_events_lapic_init(void) | |||
1249 | static int __kprobes | 1276 | static int __kprobes |
1250 | perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) | 1277 | perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) |
1251 | { | 1278 | { |
1279 | int ret; | ||
1280 | u64 start_clock; | ||
1281 | u64 finish_clock; | ||
1282 | |||
1252 | if (!atomic_read(&active_events)) | 1283 | if (!atomic_read(&active_events)) |
1253 | return NMI_DONE; | 1284 | return NMI_DONE; |
1254 | 1285 | ||
1255 | return x86_pmu.handle_irq(regs); | 1286 | start_clock = local_clock(); |
1287 | ret = x86_pmu.handle_irq(regs); | ||
1288 | finish_clock = local_clock(); | ||
1289 | |||
1290 | perf_sample_event_took(finish_clock - start_clock); | ||
1291 | |||
1292 | return ret; | ||
1256 | } | 1293 | } |
1257 | 1294 | ||
1258 | struct event_constraint emptyconstraint; | 1295 | struct event_constraint emptyconstraint; |
1259 | struct event_constraint unconstrained; | 1296 | struct event_constraint unconstrained; |
1260 | 1297 | ||
1261 | static int __cpuinit | 1298 | static int |
1262 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 1299 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
1263 | { | 1300 | { |
1264 | unsigned int cpu = (long)hcpu; | 1301 | unsigned int cpu = (long)hcpu; |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index ba9aadfa683b..97e557bc4c91 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -63,10 +63,12 @@ struct event_constraint { | |||
63 | int flags; | 63 | int flags; |
64 | }; | 64 | }; |
65 | /* | 65 | /* |
66 | * struct event_constraint flags | 66 | * struct hw_perf_event.flags flags |
67 | */ | 67 | */ |
68 | #define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */ | 68 | #define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */ |
69 | #define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */ | 69 | #define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */ |
70 | #define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style st data sampling */ | ||
71 | #define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */ | ||
70 | 72 | ||
71 | struct amd_nb { | 73 | struct amd_nb { |
72 | int nb_id; /* NorthBridge id */ | 74 | int nb_id; /* NorthBridge id */ |
@@ -227,11 +229,14 @@ struct cpu_hw_events { | |||
227 | * - inv | 229 | * - inv |
228 | * - edge | 230 | * - edge |
229 | * - cnt-mask | 231 | * - cnt-mask |
232 | * - in_tx | ||
233 | * - in_tx_checkpointed | ||
230 | * The other filters are supported by fixed counters. | 234 | * The other filters are supported by fixed counters. |
231 | * The any-thread option is supported starting with v3. | 235 | * The any-thread option is supported starting with v3. |
232 | */ | 236 | */ |
237 | #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) | ||
233 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | 238 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
234 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK) | 239 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) |
235 | 240 | ||
236 | /* | 241 | /* |
237 | * Constraint on the Event code + UMask | 242 | * Constraint on the Event code + UMask |
@@ -247,6 +252,11 @@ struct cpu_hw_events { | |||
247 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | 252 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ |
248 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) | 253 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) |
249 | 254 | ||
255 | /* DataLA version of store sampling without extra enable bit. */ | ||
256 | #define INTEL_PST_HSW_CONSTRAINT(c, n) \ | ||
257 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | ||
258 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | ||
259 | |||
250 | #define EVENT_CONSTRAINT_END \ | 260 | #define EVENT_CONSTRAINT_END \ |
251 | EVENT_CONSTRAINT(0, 0, 0) | 261 | EVENT_CONSTRAINT(0, 0, 0) |
252 | 262 | ||
@@ -301,6 +311,11 @@ union perf_capabilities { | |||
301 | u64 pebs_arch_reg:1; | 311 | u64 pebs_arch_reg:1; |
302 | u64 pebs_format:4; | 312 | u64 pebs_format:4; |
303 | u64 smm_freeze:1; | 313 | u64 smm_freeze:1; |
314 | /* | ||
315 | * PMU supports separate counter range for writing | ||
316 | * values > 32bit. | ||
317 | */ | ||
318 | u64 full_width_write:1; | ||
304 | }; | 319 | }; |
305 | u64 capabilities; | 320 | u64 capabilities; |
306 | }; | 321 | }; |
@@ -375,6 +390,7 @@ struct x86_pmu { | |||
375 | struct event_constraint *event_constraints; | 390 | struct event_constraint *event_constraints; |
376 | struct x86_pmu_quirk *quirks; | 391 | struct x86_pmu_quirk *quirks; |
377 | int perfctr_second_write; | 392 | int perfctr_second_write; |
393 | bool late_ack; | ||
378 | 394 | ||
379 | /* | 395 | /* |
380 | * sysfs attrs | 396 | * sysfs attrs |
@@ -528,7 +544,7 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | |||
528 | 544 | ||
529 | void x86_pmu_enable_all(int added); | 545 | void x86_pmu_enable_all(int added); |
530 | 546 | ||
531 | int perf_assign_events(struct event_constraint **constraints, int n, | 547 | int perf_assign_events(struct perf_event **events, int n, |
532 | int wmin, int wmax, int *assign); | 548 | int wmin, int wmax, int *assign); |
533 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); | 549 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
534 | 550 | ||
@@ -633,6 +649,8 @@ extern struct event_constraint intel_snb_pebs_event_constraints[]; | |||
633 | 649 | ||
634 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; | 650 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
635 | 651 | ||
652 | extern struct event_constraint intel_hsw_pebs_event_constraints[]; | ||
653 | |||
636 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); | 654 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
637 | 655 | ||
638 | void intel_pmu_pebs_enable(struct perf_event *event); | 656 | void intel_pmu_pebs_enable(struct perf_event *event); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 7e28d9467bb4..4cbe03287b08 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -648,48 +648,48 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
648 | .cpu_dead = amd_pmu_cpu_dead, | 648 | .cpu_dead = amd_pmu_cpu_dead, |
649 | }; | 649 | }; |
650 | 650 | ||
651 | static int setup_event_constraints(void) | 651 | static int __init amd_core_pmu_init(void) |
652 | { | 652 | { |
653 | if (boot_cpu_data.x86 == 0x15) | 653 | if (!cpu_has_perfctr_core) |
654 | return 0; | ||
655 | |||
656 | switch (boot_cpu_data.x86) { | ||
657 | case 0x15: | ||
658 | pr_cont("Fam15h "); | ||
654 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; | 659 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
655 | return 0; | 660 | break; |
656 | } | ||
657 | 661 | ||
658 | static int setup_perfctr_core(void) | 662 | default: |
659 | { | 663 | pr_err("core perfctr but no constraints; unknown hardware!\n"); |
660 | if (!cpu_has_perfctr_core) { | ||
661 | WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h, | ||
662 | KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!"); | ||
663 | return -ENODEV; | 664 | return -ENODEV; |
664 | } | 665 | } |
665 | 666 | ||
666 | WARN(x86_pmu.get_event_constraints == amd_get_event_constraints, | ||
667 | KERN_ERR "hw perf events core counters need constraints handler!"); | ||
668 | |||
669 | /* | 667 | /* |
670 | * If core performance counter extensions exists, we must use | 668 | * If core performance counter extensions exists, we must use |
671 | * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also | 669 | * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also |
672 | * x86_pmu_addr_offset(). | 670 | * amd_pmu_addr_offset(). |
673 | */ | 671 | */ |
674 | x86_pmu.eventsel = MSR_F15H_PERF_CTL; | 672 | x86_pmu.eventsel = MSR_F15H_PERF_CTL; |
675 | x86_pmu.perfctr = MSR_F15H_PERF_CTR; | 673 | x86_pmu.perfctr = MSR_F15H_PERF_CTR; |
676 | x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; | 674 | x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; |
677 | 675 | ||
678 | printk(KERN_INFO "perf: AMD core performance counters detected\n"); | 676 | pr_cont("core perfctr, "); |
679 | |||
680 | return 0; | 677 | return 0; |
681 | } | 678 | } |
682 | 679 | ||
683 | __init int amd_pmu_init(void) | 680 | __init int amd_pmu_init(void) |
684 | { | 681 | { |
682 | int ret; | ||
683 | |||
685 | /* Performance-monitoring supported from K7 and later: */ | 684 | /* Performance-monitoring supported from K7 and later: */ |
686 | if (boot_cpu_data.x86 < 6) | 685 | if (boot_cpu_data.x86 < 6) |
687 | return -ENODEV; | 686 | return -ENODEV; |
688 | 687 | ||
689 | x86_pmu = amd_pmu; | 688 | x86_pmu = amd_pmu; |
690 | 689 | ||
691 | setup_event_constraints(); | 690 | ret = amd_core_pmu_init(); |
692 | setup_perfctr_core(); | 691 | if (ret) |
692 | return ret; | ||
693 | 693 | ||
694 | /* Events are common for all AMDs */ | 694 | /* Events are common for all AMDs */ |
695 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | 695 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index 5f0581e713c2..e09f0bfb7b8f 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c | |||
@@ -851,7 +851,7 @@ static void clear_APIC_ibs(void *dummy) | |||
851 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | 851 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); |
852 | } | 852 | } |
853 | 853 | ||
854 | static int __cpuinit | 854 | static int |
855 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 855 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
856 | { | 856 | { |
857 | switch (action & ~CPU_TASKS_FROZEN) { | 857 | switch (action & ~CPU_TASKS_FROZEN) { |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c new file mode 100644 index 000000000000..639d1289b1ba --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c | |||
@@ -0,0 +1,502 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Steven Kinney <Steven.Kinney@amd.com> | ||
5 | * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com> | ||
6 | * | ||
7 | * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/perf_event.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/cpumask.h> | ||
17 | #include <linux/slab.h> | ||
18 | |||
19 | #include "perf_event.h" | ||
20 | #include "perf_event_amd_iommu.h" | ||
21 | |||
22 | #define COUNTER_SHIFT 16 | ||
23 | |||
24 | #define _GET_BANK(ev) ((u8)(ev->hw.extra_reg.reg >> 8)) | ||
25 | #define _GET_CNTR(ev) ((u8)(ev->hw.extra_reg.reg)) | ||
26 | |||
27 | /* iommu pmu config masks */ | ||
28 | #define _GET_CSOURCE(ev) ((ev->hw.config & 0xFFULL)) | ||
29 | #define _GET_DEVID(ev) ((ev->hw.config >> 8) & 0xFFFFULL) | ||
30 | #define _GET_PASID(ev) ((ev->hw.config >> 24) & 0xFFFFULL) | ||
31 | #define _GET_DOMID(ev) ((ev->hw.config >> 40) & 0xFFFFULL) | ||
32 | #define _GET_DEVID_MASK(ev) ((ev->hw.extra_reg.config) & 0xFFFFULL) | ||
33 | #define _GET_PASID_MASK(ev) ((ev->hw.extra_reg.config >> 16) & 0xFFFFULL) | ||
34 | #define _GET_DOMID_MASK(ev) ((ev->hw.extra_reg.config >> 32) & 0xFFFFULL) | ||
35 | |||
36 | static struct perf_amd_iommu __perf_iommu; | ||
37 | |||
38 | struct perf_amd_iommu { | ||
39 | struct pmu pmu; | ||
40 | u8 max_banks; | ||
41 | u8 max_counters; | ||
42 | u64 cntr_assign_mask; | ||
43 | raw_spinlock_t lock; | ||
44 | const struct attribute_group *attr_groups[4]; | ||
45 | }; | ||
46 | |||
47 | #define format_group attr_groups[0] | ||
48 | #define cpumask_group attr_groups[1] | ||
49 | #define events_group attr_groups[2] | ||
50 | #define null_group attr_groups[3] | ||
51 | |||
52 | /*--------------------------------------------- | ||
53 | * sysfs format attributes | ||
54 | *---------------------------------------------*/ | ||
55 | PMU_FORMAT_ATTR(csource, "config:0-7"); | ||
56 | PMU_FORMAT_ATTR(devid, "config:8-23"); | ||
57 | PMU_FORMAT_ATTR(pasid, "config:24-39"); | ||
58 | PMU_FORMAT_ATTR(domid, "config:40-55"); | ||
59 | PMU_FORMAT_ATTR(devid_mask, "config1:0-15"); | ||
60 | PMU_FORMAT_ATTR(pasid_mask, "config1:16-31"); | ||
61 | PMU_FORMAT_ATTR(domid_mask, "config1:32-47"); | ||
62 | |||
63 | static struct attribute *iommu_format_attrs[] = { | ||
64 | &format_attr_csource.attr, | ||
65 | &format_attr_devid.attr, | ||
66 | &format_attr_pasid.attr, | ||
67 | &format_attr_domid.attr, | ||
68 | &format_attr_devid_mask.attr, | ||
69 | &format_attr_pasid_mask.attr, | ||
70 | &format_attr_domid_mask.attr, | ||
71 | NULL, | ||
72 | }; | ||
73 | |||
74 | static struct attribute_group amd_iommu_format_group = { | ||
75 | .name = "format", | ||
76 | .attrs = iommu_format_attrs, | ||
77 | }; | ||
78 | |||
79 | /*--------------------------------------------- | ||
80 | * sysfs events attributes | ||
81 | *---------------------------------------------*/ | ||
82 | struct amd_iommu_event_desc { | ||
83 | struct kobj_attribute attr; | ||
84 | const char *event; | ||
85 | }; | ||
86 | |||
87 | static ssize_t _iommu_event_show(struct kobject *kobj, | ||
88 | struct kobj_attribute *attr, char *buf) | ||
89 | { | ||
90 | struct amd_iommu_event_desc *event = | ||
91 | container_of(attr, struct amd_iommu_event_desc, attr); | ||
92 | return sprintf(buf, "%s\n", event->event); | ||
93 | } | ||
94 | |||
95 | #define AMD_IOMMU_EVENT_DESC(_name, _event) \ | ||
96 | { \ | ||
97 | .attr = __ATTR(_name, 0444, _iommu_event_show, NULL), \ | ||
98 | .event = _event, \ | ||
99 | } | ||
100 | |||
101 | static struct amd_iommu_event_desc amd_iommu_v2_event_descs[] = { | ||
102 | AMD_IOMMU_EVENT_DESC(mem_pass_untrans, "csource=0x01"), | ||
103 | AMD_IOMMU_EVENT_DESC(mem_pass_pretrans, "csource=0x02"), | ||
104 | AMD_IOMMU_EVENT_DESC(mem_pass_excl, "csource=0x03"), | ||
105 | AMD_IOMMU_EVENT_DESC(mem_target_abort, "csource=0x04"), | ||
106 | AMD_IOMMU_EVENT_DESC(mem_trans_total, "csource=0x05"), | ||
107 | AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit, "csource=0x06"), | ||
108 | AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis, "csource=0x07"), | ||
109 | AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit, "csource=0x08"), | ||
110 | AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis, "csource=0x09"), | ||
111 | AMD_IOMMU_EVENT_DESC(mem_dte_hit, "csource=0x0a"), | ||
112 | AMD_IOMMU_EVENT_DESC(mem_dte_mis, "csource=0x0b"), | ||
113 | AMD_IOMMU_EVENT_DESC(page_tbl_read_tot, "csource=0x0c"), | ||
114 | AMD_IOMMU_EVENT_DESC(page_tbl_read_nst, "csource=0x0d"), | ||
115 | AMD_IOMMU_EVENT_DESC(page_tbl_read_gst, "csource=0x0e"), | ||
116 | AMD_IOMMU_EVENT_DESC(int_dte_hit, "csource=0x0f"), | ||
117 | AMD_IOMMU_EVENT_DESC(int_dte_mis, "csource=0x10"), | ||
118 | AMD_IOMMU_EVENT_DESC(cmd_processed, "csource=0x11"), | ||
119 | AMD_IOMMU_EVENT_DESC(cmd_processed_inv, "csource=0x12"), | ||
120 | AMD_IOMMU_EVENT_DESC(tlb_inv, "csource=0x13"), | ||
121 | { /* end: all zeroes */ }, | ||
122 | }; | ||
123 | |||
124 | /*--------------------------------------------- | ||
125 | * sysfs cpumask attributes | ||
126 | *---------------------------------------------*/ | ||
127 | static cpumask_t iommu_cpumask; | ||
128 | |||
129 | static ssize_t _iommu_cpumask_show(struct device *dev, | ||
130 | struct device_attribute *attr, | ||
131 | char *buf) | ||
132 | { | ||
133 | int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &iommu_cpumask); | ||
134 | buf[n++] = '\n'; | ||
135 | buf[n] = '\0'; | ||
136 | return n; | ||
137 | } | ||
138 | static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL); | ||
139 | |||
140 | static struct attribute *iommu_cpumask_attrs[] = { | ||
141 | &dev_attr_cpumask.attr, | ||
142 | NULL, | ||
143 | }; | ||
144 | |||
145 | static struct attribute_group amd_iommu_cpumask_group = { | ||
146 | .attrs = iommu_cpumask_attrs, | ||
147 | }; | ||
148 | |||
149 | /*---------------------------------------------*/ | ||
150 | |||
151 | static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu) | ||
152 | { | ||
153 | unsigned long flags; | ||
154 | int shift, bank, cntr, retval; | ||
155 | int max_banks = perf_iommu->max_banks; | ||
156 | int max_cntrs = perf_iommu->max_counters; | ||
157 | |||
158 | raw_spin_lock_irqsave(&perf_iommu->lock, flags); | ||
159 | |||
160 | for (bank = 0, shift = 0; bank < max_banks; bank++) { | ||
161 | for (cntr = 0; cntr < max_cntrs; cntr++) { | ||
162 | shift = bank + (bank*3) + cntr; | ||
163 | if (perf_iommu->cntr_assign_mask & (1ULL<<shift)) { | ||
164 | continue; | ||
165 | } else { | ||
166 | perf_iommu->cntr_assign_mask |= (1ULL<<shift); | ||
167 | retval = ((u16)((u16)bank<<8) | (u8)(cntr)); | ||
168 | goto out; | ||
169 | } | ||
170 | } | ||
171 | } | ||
172 | retval = -ENOSPC; | ||
173 | out: | ||
174 | raw_spin_unlock_irqrestore(&perf_iommu->lock, flags); | ||
175 | return retval; | ||
176 | } | ||
177 | |||
178 | static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu, | ||
179 | u8 bank, u8 cntr) | ||
180 | { | ||
181 | unsigned long flags; | ||
182 | int max_banks, max_cntrs; | ||
183 | int shift = 0; | ||
184 | |||
185 | max_banks = perf_iommu->max_banks; | ||
186 | max_cntrs = perf_iommu->max_counters; | ||
187 | |||
188 | if ((bank > max_banks) || (cntr > max_cntrs)) | ||
189 | return -EINVAL; | ||
190 | |||
191 | shift = bank + cntr + (bank*3); | ||
192 | |||
193 | raw_spin_lock_irqsave(&perf_iommu->lock, flags); | ||
194 | perf_iommu->cntr_assign_mask &= ~(1ULL<<shift); | ||
195 | raw_spin_unlock_irqrestore(&perf_iommu->lock, flags); | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static int perf_iommu_event_init(struct perf_event *event) | ||
201 | { | ||
202 | struct hw_perf_event *hwc = &event->hw; | ||
203 | struct perf_amd_iommu *perf_iommu; | ||
204 | u64 config, config1; | ||
205 | |||
206 | /* test the event attr type check for PMU enumeration */ | ||
207 | if (event->attr.type != event->pmu->type) | ||
208 | return -ENOENT; | ||
209 | |||
210 | /* | ||
211 | * IOMMU counters are shared across all cores. | ||
212 | * Therefore, it does not support per-process mode. | ||
213 | * Also, it does not support event sampling mode. | ||
214 | */ | ||
215 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | ||
216 | return -EINVAL; | ||
217 | |||
218 | /* IOMMU counters do not have usr/os/guest/host bits */ | ||
219 | if (event->attr.exclude_user || event->attr.exclude_kernel || | ||
220 | event->attr.exclude_host || event->attr.exclude_guest) | ||
221 | return -EINVAL; | ||
222 | |||
223 | if (event->cpu < 0) | ||
224 | return -EINVAL; | ||
225 | |||
226 | perf_iommu = &__perf_iommu; | ||
227 | |||
228 | if (event->pmu != &perf_iommu->pmu) | ||
229 | return -ENOENT; | ||
230 | |||
231 | if (perf_iommu) { | ||
232 | config = event->attr.config; | ||
233 | config1 = event->attr.config1; | ||
234 | } else { | ||
235 | return -EINVAL; | ||
236 | } | ||
237 | |||
238 | /* integrate with iommu base devid (0000), assume one iommu */ | ||
239 | perf_iommu->max_banks = | ||
240 | amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID); | ||
241 | perf_iommu->max_counters = | ||
242 | amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID); | ||
243 | if ((perf_iommu->max_banks == 0) || (perf_iommu->max_counters == 0)) | ||
244 | return -EINVAL; | ||
245 | |||
246 | /* update the hw_perf_event struct with the iommu config data */ | ||
247 | hwc->config = config; | ||
248 | hwc->extra_reg.config = config1; | ||
249 | |||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static void perf_iommu_enable_event(struct perf_event *ev) | ||
254 | { | ||
255 | u8 csource = _GET_CSOURCE(ev); | ||
256 | u16 devid = _GET_DEVID(ev); | ||
257 | u64 reg = 0ULL; | ||
258 | |||
259 | reg = csource; | ||
260 | amd_iommu_pc_get_set_reg_val(devid, | ||
261 | _GET_BANK(ev), _GET_CNTR(ev) , | ||
262 | IOMMU_PC_COUNTER_SRC_REG, ®, true); | ||
263 | |||
264 | reg = 0ULL | devid | (_GET_DEVID_MASK(ev) << 32); | ||
265 | if (reg) | ||
266 | reg |= (1UL << 31); | ||
267 | amd_iommu_pc_get_set_reg_val(devid, | ||
268 | _GET_BANK(ev), _GET_CNTR(ev) , | ||
269 | IOMMU_PC_DEVID_MATCH_REG, ®, true); | ||
270 | |||
271 | reg = 0ULL | _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32); | ||
272 | if (reg) | ||
273 | reg |= (1UL << 31); | ||
274 | amd_iommu_pc_get_set_reg_val(devid, | ||
275 | _GET_BANK(ev), _GET_CNTR(ev) , | ||
276 | IOMMU_PC_PASID_MATCH_REG, ®, true); | ||
277 | |||
278 | reg = 0ULL | _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32); | ||
279 | if (reg) | ||
280 | reg |= (1UL << 31); | ||
281 | amd_iommu_pc_get_set_reg_val(devid, | ||
282 | _GET_BANK(ev), _GET_CNTR(ev) , | ||
283 | IOMMU_PC_DOMID_MATCH_REG, ®, true); | ||
284 | } | ||
285 | |||
286 | static void perf_iommu_disable_event(struct perf_event *event) | ||
287 | { | ||
288 | u64 reg = 0ULL; | ||
289 | |||
290 | amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), | ||
291 | _GET_BANK(event), _GET_CNTR(event), | ||
292 | IOMMU_PC_COUNTER_SRC_REG, ®, true); | ||
293 | } | ||
294 | |||
295 | static void perf_iommu_start(struct perf_event *event, int flags) | ||
296 | { | ||
297 | struct hw_perf_event *hwc = &event->hw; | ||
298 | |||
299 | pr_debug("perf: amd_iommu:perf_iommu_start\n"); | ||
300 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | ||
301 | return; | ||
302 | |||
303 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
304 | hwc->state = 0; | ||
305 | |||
306 | if (flags & PERF_EF_RELOAD) { | ||
307 | u64 prev_raw_count = local64_read(&hwc->prev_count); | ||
308 | amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), | ||
309 | _GET_BANK(event), _GET_CNTR(event), | ||
310 | IOMMU_PC_COUNTER_REG, &prev_raw_count, true); | ||
311 | } | ||
312 | |||
313 | perf_iommu_enable_event(event); | ||
314 | perf_event_update_userpage(event); | ||
315 | |||
316 | } | ||
317 | |||
318 | static void perf_iommu_read(struct perf_event *event) | ||
319 | { | ||
320 | u64 count = 0ULL; | ||
321 | u64 prev_raw_count = 0ULL; | ||
322 | u64 delta = 0ULL; | ||
323 | struct hw_perf_event *hwc = &event->hw; | ||
324 | pr_debug("perf: amd_iommu:perf_iommu_read\n"); | ||
325 | |||
326 | amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), | ||
327 | _GET_BANK(event), _GET_CNTR(event), | ||
328 | IOMMU_PC_COUNTER_REG, &count, false); | ||
329 | |||
330 | /* IOMMU pc counter register is only 48 bits */ | ||
331 | count &= 0xFFFFFFFFFFFFULL; | ||
332 | |||
333 | prev_raw_count = local64_read(&hwc->prev_count); | ||
334 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
335 | count) != prev_raw_count) | ||
336 | return; | ||
337 | |||
338 | /* Handling 48-bit counter overflowing */ | ||
339 | delta = (count << COUNTER_SHIFT) - (prev_raw_count << COUNTER_SHIFT); | ||
340 | delta >>= COUNTER_SHIFT; | ||
341 | local64_add(delta, &event->count); | ||
342 | |||
343 | } | ||
344 | |||
345 | static void perf_iommu_stop(struct perf_event *event, int flags) | ||
346 | { | ||
347 | struct hw_perf_event *hwc = &event->hw; | ||
348 | u64 config; | ||
349 | |||
350 | pr_debug("perf: amd_iommu:perf_iommu_stop\n"); | ||
351 | |||
352 | if (hwc->state & PERF_HES_UPTODATE) | ||
353 | return; | ||
354 | |||
355 | perf_iommu_disable_event(event); | ||
356 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | ||
357 | hwc->state |= PERF_HES_STOPPED; | ||
358 | |||
359 | if (hwc->state & PERF_HES_UPTODATE) | ||
360 | return; | ||
361 | |||
362 | config = hwc->config; | ||
363 | perf_iommu_read(event); | ||
364 | hwc->state |= PERF_HES_UPTODATE; | ||
365 | } | ||
366 | |||
367 | static int perf_iommu_add(struct perf_event *event, int flags) | ||
368 | { | ||
369 | int retval; | ||
370 | struct perf_amd_iommu *perf_iommu = | ||
371 | container_of(event->pmu, struct perf_amd_iommu, pmu); | ||
372 | |||
373 | pr_debug("perf: amd_iommu:perf_iommu_add\n"); | ||
374 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
375 | |||
376 | /* request an iommu bank/counter */ | ||
377 | retval = get_next_avail_iommu_bnk_cntr(perf_iommu); | ||
378 | if (retval != -ENOSPC) | ||
379 | event->hw.extra_reg.reg = (u16)retval; | ||
380 | else | ||
381 | return retval; | ||
382 | |||
383 | if (flags & PERF_EF_START) | ||
384 | perf_iommu_start(event, PERF_EF_RELOAD); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static void perf_iommu_del(struct perf_event *event, int flags) | ||
390 | { | ||
391 | struct perf_amd_iommu *perf_iommu = | ||
392 | container_of(event->pmu, struct perf_amd_iommu, pmu); | ||
393 | |||
394 | pr_debug("perf: amd_iommu:perf_iommu_del\n"); | ||
395 | perf_iommu_stop(event, PERF_EF_UPDATE); | ||
396 | |||
397 | /* clear the assigned iommu bank/counter */ | ||
398 | clear_avail_iommu_bnk_cntr(perf_iommu, | ||
399 | _GET_BANK(event), | ||
400 | _GET_CNTR(event)); | ||
401 | |||
402 | perf_event_update_userpage(event); | ||
403 | } | ||
404 | |||
405 | static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu) | ||
406 | { | ||
407 | struct attribute **attrs; | ||
408 | struct attribute_group *attr_group; | ||
409 | int i = 0, j; | ||
410 | |||
411 | while (amd_iommu_v2_event_descs[i].attr.attr.name) | ||
412 | i++; | ||
413 | |||
414 | attr_group = kzalloc(sizeof(struct attribute *) | ||
415 | * (i + 1) + sizeof(*attr_group), GFP_KERNEL); | ||
416 | if (!attr_group) | ||
417 | return -ENOMEM; | ||
418 | |||
419 | attrs = (struct attribute **)(attr_group + 1); | ||
420 | for (j = 0; j < i; j++) | ||
421 | attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr; | ||
422 | |||
423 | attr_group->name = "events"; | ||
424 | attr_group->attrs = attrs; | ||
425 | perf_iommu->events_group = attr_group; | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static __init void amd_iommu_pc_exit(void) | ||
431 | { | ||
432 | if (__perf_iommu.events_group != NULL) { | ||
433 | kfree(__perf_iommu.events_group); | ||
434 | __perf_iommu.events_group = NULL; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | static __init int _init_perf_amd_iommu( | ||
439 | struct perf_amd_iommu *perf_iommu, char *name) | ||
440 | { | ||
441 | int ret; | ||
442 | |||
443 | raw_spin_lock_init(&perf_iommu->lock); | ||
444 | |||
445 | /* Init format attributes */ | ||
446 | perf_iommu->format_group = &amd_iommu_format_group; | ||
447 | |||
448 | /* Init cpumask attributes to only core 0 */ | ||
449 | cpumask_set_cpu(0, &iommu_cpumask); | ||
450 | perf_iommu->cpumask_group = &amd_iommu_cpumask_group; | ||
451 | |||
452 | /* Init events attributes */ | ||
453 | if (_init_events_attrs(perf_iommu) != 0) | ||
454 | pr_err("perf: amd_iommu: Only support raw events.\n"); | ||
455 | |||
456 | /* Init null attributes */ | ||
457 | perf_iommu->null_group = NULL; | ||
458 | perf_iommu->pmu.attr_groups = perf_iommu->attr_groups; | ||
459 | |||
460 | ret = perf_pmu_register(&perf_iommu->pmu, name, -1); | ||
461 | if (ret) { | ||
462 | pr_err("perf: amd_iommu: Failed to initialized.\n"); | ||
463 | amd_iommu_pc_exit(); | ||
464 | } else { | ||
465 | pr_info("perf: amd_iommu: Detected. (%d banks, %d counters/bank)\n", | ||
466 | amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID), | ||
467 | amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID)); | ||
468 | } | ||
469 | |||
470 | return ret; | ||
471 | } | ||
472 | |||
473 | static struct perf_amd_iommu __perf_iommu = { | ||
474 | .pmu = { | ||
475 | .event_init = perf_iommu_event_init, | ||
476 | .add = perf_iommu_add, | ||
477 | .del = perf_iommu_del, | ||
478 | .start = perf_iommu_start, | ||
479 | .stop = perf_iommu_stop, | ||
480 | .read = perf_iommu_read, | ||
481 | }, | ||
482 | .max_banks = 0x00, | ||
483 | .max_counters = 0x00, | ||
484 | .cntr_assign_mask = 0ULL, | ||
485 | .format_group = NULL, | ||
486 | .cpumask_group = NULL, | ||
487 | .events_group = NULL, | ||
488 | .null_group = NULL, | ||
489 | }; | ||
490 | |||
491 | static __init int amd_iommu_pc_init(void) | ||
492 | { | ||
493 | /* Make sure the IOMMU PC resource is available */ | ||
494 | if (!amd_iommu_pc_supported()) | ||
495 | return -ENODEV; | ||
496 | |||
497 | _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); | ||
498 | |||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | device_initcall(amd_iommu_pc_init); | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.h b/arch/x86/kernel/cpu/perf_event_amd_iommu.h new file mode 100644 index 000000000000..845d173278e3 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Steven Kinney <Steven.Kinney@amd.com> | ||
5 | * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef _PERF_EVENT_AMD_IOMMU_H_ | ||
13 | #define _PERF_EVENT_AMD_IOMMU_H_ | ||
14 | |||
15 | /* iommu pc mmio region register indexes */ | ||
16 | #define IOMMU_PC_COUNTER_REG 0x00 | ||
17 | #define IOMMU_PC_COUNTER_SRC_REG 0x08 | ||
18 | #define IOMMU_PC_PASID_MATCH_REG 0x10 | ||
19 | #define IOMMU_PC_DOMID_MATCH_REG 0x18 | ||
20 | #define IOMMU_PC_DEVID_MATCH_REG 0x20 | ||
21 | #define IOMMU_PC_COUNTER_REPORT_REG 0x28 | ||
22 | |||
23 | /* maximun specified bank/counters */ | ||
24 | #define PC_MAX_SPEC_BNKS 64 | ||
25 | #define PC_MAX_SPEC_CNTRS 16 | ||
26 | |||
27 | /* iommu pc reg masks*/ | ||
28 | #define IOMMU_BASE_DEVID 0x0000 | ||
29 | |||
30 | /* amd_iommu_init.c external support functions */ | ||
31 | extern bool amd_iommu_pc_supported(void); | ||
32 | |||
33 | extern u8 amd_iommu_pc_get_max_banks(u16 devid); | ||
34 | |||
35 | extern u8 amd_iommu_pc_get_max_counters(u16 devid); | ||
36 | |||
37 | extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, | ||
38 | u8 fxn, u64 *value, bool is_write); | ||
39 | |||
40 | #endif /*_PERF_EVENT_AMD_IOMMU_H_*/ | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c index c0c661adf03e..754291adec33 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c | |||
@@ -288,13 +288,13 @@ static struct pmu amd_l2_pmu = { | |||
288 | .read = amd_uncore_read, | 288 | .read = amd_uncore_read, |
289 | }; | 289 | }; |
290 | 290 | ||
291 | static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu) | 291 | static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) |
292 | { | 292 | { |
293 | return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, | 293 | return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, |
294 | cpu_to_node(cpu)); | 294 | cpu_to_node(cpu)); |
295 | } | 295 | } |
296 | 296 | ||
297 | static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) | 297 | static void amd_uncore_cpu_up_prepare(unsigned int cpu) |
298 | { | 298 | { |
299 | struct amd_uncore *uncore; | 299 | struct amd_uncore *uncore; |
300 | 300 | ||
@@ -322,8 +322,8 @@ static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | static struct amd_uncore * | 324 | static struct amd_uncore * |
325 | __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, | 325 | amd_uncore_find_online_sibling(struct amd_uncore *this, |
326 | struct amd_uncore * __percpu *uncores) | 326 | struct amd_uncore * __percpu *uncores) |
327 | { | 327 | { |
328 | unsigned int cpu; | 328 | unsigned int cpu; |
329 | struct amd_uncore *that; | 329 | struct amd_uncore *that; |
@@ -348,7 +348,7 @@ __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, | |||
348 | return this; | 348 | return this; |
349 | } | 349 | } |
350 | 350 | ||
351 | static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) | 351 | static void amd_uncore_cpu_starting(unsigned int cpu) |
352 | { | 352 | { |
353 | unsigned int eax, ebx, ecx, edx; | 353 | unsigned int eax, ebx, ecx, edx; |
354 | struct amd_uncore *uncore; | 354 | struct amd_uncore *uncore; |
@@ -376,8 +376,8 @@ static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) | |||
376 | } | 376 | } |
377 | } | 377 | } |
378 | 378 | ||
379 | static void __cpuinit uncore_online(unsigned int cpu, | 379 | static void uncore_online(unsigned int cpu, |
380 | struct amd_uncore * __percpu *uncores) | 380 | struct amd_uncore * __percpu *uncores) |
381 | { | 381 | { |
382 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); | 382 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); |
383 | 383 | ||
@@ -388,7 +388,7 @@ static void __cpuinit uncore_online(unsigned int cpu, | |||
388 | cpumask_set_cpu(cpu, uncore->active_mask); | 388 | cpumask_set_cpu(cpu, uncore->active_mask); |
389 | } | 389 | } |
390 | 390 | ||
391 | static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) | 391 | static void amd_uncore_cpu_online(unsigned int cpu) |
392 | { | 392 | { |
393 | if (amd_uncore_nb) | 393 | if (amd_uncore_nb) |
394 | uncore_online(cpu, amd_uncore_nb); | 394 | uncore_online(cpu, amd_uncore_nb); |
@@ -397,8 +397,8 @@ static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) | |||
397 | uncore_online(cpu, amd_uncore_l2); | 397 | uncore_online(cpu, amd_uncore_l2); |
398 | } | 398 | } |
399 | 399 | ||
400 | static void __cpuinit uncore_down_prepare(unsigned int cpu, | 400 | static void uncore_down_prepare(unsigned int cpu, |
401 | struct amd_uncore * __percpu *uncores) | 401 | struct amd_uncore * __percpu *uncores) |
402 | { | 402 | { |
403 | unsigned int i; | 403 | unsigned int i; |
404 | struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); | 404 | struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); |
@@ -423,7 +423,7 @@ static void __cpuinit uncore_down_prepare(unsigned int cpu, | |||
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) | 426 | static void amd_uncore_cpu_down_prepare(unsigned int cpu) |
427 | { | 427 | { |
428 | if (amd_uncore_nb) | 428 | if (amd_uncore_nb) |
429 | uncore_down_prepare(cpu, amd_uncore_nb); | 429 | uncore_down_prepare(cpu, amd_uncore_nb); |
@@ -432,8 +432,7 @@ static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) | |||
432 | uncore_down_prepare(cpu, amd_uncore_l2); | 432 | uncore_down_prepare(cpu, amd_uncore_l2); |
433 | } | 433 | } |
434 | 434 | ||
435 | static void __cpuinit uncore_dead(unsigned int cpu, | 435 | static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) |
436 | struct amd_uncore * __percpu *uncores) | ||
437 | { | 436 | { |
438 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); | 437 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); |
439 | 438 | ||
@@ -445,7 +444,7 @@ static void __cpuinit uncore_dead(unsigned int cpu, | |||
445 | *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; | 444 | *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; |
446 | } | 445 | } |
447 | 446 | ||
448 | static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) | 447 | static void amd_uncore_cpu_dead(unsigned int cpu) |
449 | { | 448 | { |
450 | if (amd_uncore_nb) | 449 | if (amd_uncore_nb) |
451 | uncore_dead(cpu, amd_uncore_nb); | 450 | uncore_dead(cpu, amd_uncore_nb); |
@@ -454,7 +453,7 @@ static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) | |||
454 | uncore_dead(cpu, amd_uncore_l2); | 453 | uncore_dead(cpu, amd_uncore_l2); |
455 | } | 454 | } |
456 | 455 | ||
457 | static int __cpuinit | 456 | static int |
458 | amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, | 457 | amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, |
459 | void *hcpu) | 458 | void *hcpu) |
460 | { | 459 | { |
@@ -489,7 +488,7 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, | |||
489 | return NOTIFY_OK; | 488 | return NOTIFY_OK; |
490 | } | 489 | } |
491 | 490 | ||
492 | static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = { | 491 | static struct notifier_block amd_uncore_cpu_notifier_block = { |
493 | .notifier_call = amd_uncore_cpu_notifier, | 492 | .notifier_call = amd_uncore_cpu_notifier, |
494 | .priority = CPU_PRI_PERF + 1, | 493 | .priority = CPU_PRI_PERF + 1, |
495 | }; | 494 | }; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a9e22073bd56..a45d8d4ace10 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/export.h> | 14 | #include <linux/export.h> |
15 | 15 | ||
16 | #include <asm/cpufeature.h> | ||
16 | #include <asm/hardirq.h> | 17 | #include <asm/hardirq.h> |
17 | #include <asm/apic.h> | 18 | #include <asm/apic.h> |
18 | 19 | ||
@@ -190,6 +191,22 @@ struct attribute *snb_events_attrs[] = { | |||
190 | NULL, | 191 | NULL, |
191 | }; | 192 | }; |
192 | 193 | ||
194 | static struct event_constraint intel_hsw_event_constraints[] = { | ||
195 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | ||
196 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | ||
197 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | ||
198 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */ | ||
199 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | ||
200 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | ||
201 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | ||
202 | INTEL_EVENT_CONSTRAINT(0x08a3, 0x4), | ||
203 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | ||
204 | INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4), | ||
205 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ | ||
206 | INTEL_EVENT_CONSTRAINT(0x04a3, 0xf), | ||
207 | EVENT_CONSTRAINT_END | ||
208 | }; | ||
209 | |||
193 | static u64 intel_pmu_event_map(int hw_event) | 210 | static u64 intel_pmu_event_map(int hw_event) |
194 | { | 211 | { |
195 | return intel_perfmon_event_map[hw_event]; | 212 | return intel_perfmon_event_map[hw_event]; |
@@ -872,7 +889,8 @@ static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) | |||
872 | return true; | 889 | return true; |
873 | 890 | ||
874 | /* implicit branch sampling to correct PEBS skid */ | 891 | /* implicit branch sampling to correct PEBS skid */ |
875 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) | 892 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 && |
893 | x86_pmu.intel_cap.pebs_format < 2) | ||
876 | return true; | 894 | return true; |
877 | 895 | ||
878 | return false; | 896 | return false; |
@@ -1167,15 +1185,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1167 | cpuc = &__get_cpu_var(cpu_hw_events); | 1185 | cpuc = &__get_cpu_var(cpu_hw_events); |
1168 | 1186 | ||
1169 | /* | 1187 | /* |
1170 | * Some chipsets need to unmask the LVTPC in a particular spot | 1188 | * No known reason to not always do late ACK, |
1171 | * inside the nmi handler. As a result, the unmasking was pushed | 1189 | * but just in case do it opt-in. |
1172 | * into all the nmi handlers. | ||
1173 | * | ||
1174 | * This handler doesn't seem to have any issues with the unmasking | ||
1175 | * so it was left at the top. | ||
1176 | */ | 1190 | */ |
1177 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1191 | if (!x86_pmu.late_ack) |
1178 | 1192 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
1179 | intel_pmu_disable_all(); | 1193 | intel_pmu_disable_all(); |
1180 | handled = intel_pmu_drain_bts_buffer(); | 1194 | handled = intel_pmu_drain_bts_buffer(); |
1181 | status = intel_pmu_get_status(); | 1195 | status = intel_pmu_get_status(); |
@@ -1188,8 +1202,12 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1188 | again: | 1202 | again: |
1189 | intel_pmu_ack_status(status); | 1203 | intel_pmu_ack_status(status); |
1190 | if (++loops > 100) { | 1204 | if (++loops > 100) { |
1191 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | 1205 | static bool warned = false; |
1192 | perf_event_print_debug(); | 1206 | if (!warned) { |
1207 | WARN(1, "perfevents: irq loop stuck!\n"); | ||
1208 | perf_event_print_debug(); | ||
1209 | warned = true; | ||
1210 | } | ||
1193 | intel_pmu_reset(); | 1211 | intel_pmu_reset(); |
1194 | goto done; | 1212 | goto done; |
1195 | } | 1213 | } |
@@ -1235,6 +1253,13 @@ again: | |||
1235 | 1253 | ||
1236 | done: | 1254 | done: |
1237 | intel_pmu_enable_all(0); | 1255 | intel_pmu_enable_all(0); |
1256 | /* | ||
1257 | * Only unmask the NMI after the overflow counters | ||
1258 | * have been reset. This avoids spurious NMIs on | ||
1259 | * Haswell CPUs. | ||
1260 | */ | ||
1261 | if (x86_pmu.late_ack) | ||
1262 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
1238 | return handled; | 1263 | return handled; |
1239 | } | 1264 | } |
1240 | 1265 | ||
@@ -1425,7 +1450,6 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
1425 | if (x86_pmu.event_constraints) { | 1450 | if (x86_pmu.event_constraints) { |
1426 | for_each_event_constraint(c, x86_pmu.event_constraints) { | 1451 | for_each_event_constraint(c, x86_pmu.event_constraints) { |
1427 | if ((event->hw.config & c->cmask) == c->code) { | 1452 | if ((event->hw.config & c->cmask) == c->code) { |
1428 | /* hw.flags zeroed at initialization */ | ||
1429 | event->hw.flags |= c->flags; | 1453 | event->hw.flags |= c->flags; |
1430 | return c; | 1454 | return c; |
1431 | } | 1455 | } |
@@ -1473,7 +1497,6 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, | |||
1473 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, | 1497 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, |
1474 | struct perf_event *event) | 1498 | struct perf_event *event) |
1475 | { | 1499 | { |
1476 | event->hw.flags = 0; | ||
1477 | intel_put_shared_regs_event_constraints(cpuc, event); | 1500 | intel_put_shared_regs_event_constraints(cpuc, event); |
1478 | } | 1501 | } |
1479 | 1502 | ||
@@ -1646,6 +1669,47 @@ static void core_pmu_enable_all(int added) | |||
1646 | } | 1669 | } |
1647 | } | 1670 | } |
1648 | 1671 | ||
1672 | static int hsw_hw_config(struct perf_event *event) | ||
1673 | { | ||
1674 | int ret = intel_pmu_hw_config(event); | ||
1675 | |||
1676 | if (ret) | ||
1677 | return ret; | ||
1678 | if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE)) | ||
1679 | return 0; | ||
1680 | event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); | ||
1681 | |||
1682 | /* | ||
1683 | * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with | ||
1684 | * PEBS or in ANY thread mode. Since the results are non-sensical forbid | ||
1685 | * this combination. | ||
1686 | */ | ||
1687 | if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && | ||
1688 | ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || | ||
1689 | event->attr.precise_ip > 0)) | ||
1690 | return -EOPNOTSUPP; | ||
1691 | |||
1692 | return 0; | ||
1693 | } | ||
1694 | |||
1695 | static struct event_constraint counter2_constraint = | ||
1696 | EVENT_CONSTRAINT(0, 0x4, 0); | ||
1697 | |||
1698 | static struct event_constraint * | ||
1699 | hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
1700 | { | ||
1701 | struct event_constraint *c = intel_get_event_constraints(cpuc, event); | ||
1702 | |||
1703 | /* Handle special quirk on in_tx_checkpointed only in counter 2 */ | ||
1704 | if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { | ||
1705 | if (c->idxmsk64 & (1U << 2)) | ||
1706 | return &counter2_constraint; | ||
1707 | return &emptyconstraint; | ||
1708 | } | ||
1709 | |||
1710 | return c; | ||
1711 | } | ||
1712 | |||
1649 | PMU_FORMAT_ATTR(event, "config:0-7" ); | 1713 | PMU_FORMAT_ATTR(event, "config:0-7" ); |
1650 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | 1714 | PMU_FORMAT_ATTR(umask, "config:8-15" ); |
1651 | PMU_FORMAT_ATTR(edge, "config:18" ); | 1715 | PMU_FORMAT_ATTR(edge, "config:18" ); |
@@ -1653,6 +1717,8 @@ PMU_FORMAT_ATTR(pc, "config:19" ); | |||
1653 | PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */ | 1717 | PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */ |
1654 | PMU_FORMAT_ATTR(inv, "config:23" ); | 1718 | PMU_FORMAT_ATTR(inv, "config:23" ); |
1655 | PMU_FORMAT_ATTR(cmask, "config:24-31" ); | 1719 | PMU_FORMAT_ATTR(cmask, "config:24-31" ); |
1720 | PMU_FORMAT_ATTR(in_tx, "config:32"); | ||
1721 | PMU_FORMAT_ATTR(in_tx_cp, "config:33"); | ||
1656 | 1722 | ||
1657 | static struct attribute *intel_arch_formats_attr[] = { | 1723 | static struct attribute *intel_arch_formats_attr[] = { |
1658 | &format_attr_event.attr, | 1724 | &format_attr_event.attr, |
@@ -1807,6 +1873,8 @@ static struct attribute *intel_arch3_formats_attr[] = { | |||
1807 | &format_attr_any.attr, | 1873 | &format_attr_any.attr, |
1808 | &format_attr_inv.attr, | 1874 | &format_attr_inv.attr, |
1809 | &format_attr_cmask.attr, | 1875 | &format_attr_cmask.attr, |
1876 | &format_attr_in_tx.attr, | ||
1877 | &format_attr_in_tx_cp.attr, | ||
1810 | 1878 | ||
1811 | &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */ | 1879 | &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */ |
1812 | &format_attr_ldlat.attr, /* PEBS load latency */ | 1880 | &format_attr_ldlat.attr, /* PEBS load latency */ |
@@ -1966,6 +2034,15 @@ static __init void intel_nehalem_quirk(void) | |||
1966 | } | 2034 | } |
1967 | } | 2035 | } |
1968 | 2036 | ||
2037 | EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3"); | ||
2038 | EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82") | ||
2039 | |||
2040 | static struct attribute *hsw_events_attrs[] = { | ||
2041 | EVENT_PTR(mem_ld_hsw), | ||
2042 | EVENT_PTR(mem_st_hsw), | ||
2043 | NULL | ||
2044 | }; | ||
2045 | |||
1969 | __init int intel_pmu_init(void) | 2046 | __init int intel_pmu_init(void) |
1970 | { | 2047 | { |
1971 | union cpuid10_edx edx; | 2048 | union cpuid10_edx edx; |
@@ -2189,6 +2266,31 @@ __init int intel_pmu_init(void) | |||
2189 | break; | 2266 | break; |
2190 | 2267 | ||
2191 | 2268 | ||
2269 | case 60: /* Haswell Client */ | ||
2270 | case 70: | ||
2271 | case 71: | ||
2272 | case 63: | ||
2273 | case 69: | ||
2274 | x86_pmu.late_ack = true; | ||
2275 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | ||
2276 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | ||
2277 | |||
2278 | intel_pmu_lbr_init_snb(); | ||
2279 | |||
2280 | x86_pmu.event_constraints = intel_hsw_event_constraints; | ||
2281 | x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; | ||
2282 | x86_pmu.extra_regs = intel_snb_extra_regs; | ||
2283 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | ||
2284 | /* all extra regs are per-cpu when HT is on */ | ||
2285 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | ||
2286 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | ||
2287 | |||
2288 | x86_pmu.hw_config = hsw_hw_config; | ||
2289 | x86_pmu.get_event_constraints = hsw_get_event_constraints; | ||
2290 | x86_pmu.cpu_events = hsw_events_attrs; | ||
2291 | pr_cont("Haswell events, "); | ||
2292 | break; | ||
2293 | |||
2192 | default: | 2294 | default: |
2193 | switch (x86_pmu.version) { | 2295 | switch (x86_pmu.version) { |
2194 | case 1: | 2296 | case 1: |
@@ -2227,7 +2329,7 @@ __init int intel_pmu_init(void) | |||
2227 | * counter, so do not extend mask to generic counters | 2329 | * counter, so do not extend mask to generic counters |
2228 | */ | 2330 | */ |
2229 | for_each_event_constraint(c, x86_pmu.event_constraints) { | 2331 | for_each_event_constraint(c, x86_pmu.event_constraints) { |
2230 | if (c->cmask != X86_RAW_EVENT_MASK | 2332 | if (c->cmask != FIXED_EVENT_FLAGS |
2231 | || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) { | 2333 | || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) { |
2232 | continue; | 2334 | continue; |
2233 | } | 2335 | } |
@@ -2237,5 +2339,12 @@ __init int intel_pmu_init(void) | |||
2237 | } | 2339 | } |
2238 | } | 2340 | } |
2239 | 2341 | ||
2342 | /* Support full width counters using alternative MSR range */ | ||
2343 | if (x86_pmu.intel_cap.full_width_write) { | ||
2344 | x86_pmu.max_period = x86_pmu.cntval_mask; | ||
2345 | x86_pmu.perfctr = MSR_IA32_PMC0; | ||
2346 | pr_cont("full-width counters, "); | ||
2347 | } | ||
2348 | |||
2240 | return 0; | 2349 | return 0; |
2241 | } | 2350 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 60250f687052..3065c57a63c1 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -107,6 +107,19 @@ static u64 precise_store_data(u64 status) | |||
107 | return val; | 107 | return val; |
108 | } | 108 | } |
109 | 109 | ||
110 | static u64 precise_store_data_hsw(u64 status) | ||
111 | { | ||
112 | union perf_mem_data_src dse; | ||
113 | |||
114 | dse.val = 0; | ||
115 | dse.mem_op = PERF_MEM_OP_STORE; | ||
116 | dse.mem_lvl = PERF_MEM_LVL_NA; | ||
117 | if (status & 1) | ||
118 | dse.mem_lvl = PERF_MEM_LVL_L1; | ||
119 | /* Nothing else supported. Sorry. */ | ||
120 | return dse.val; | ||
121 | } | ||
122 | |||
110 | static u64 load_latency_data(u64 status) | 123 | static u64 load_latency_data(u64 status) |
111 | { | 124 | { |
112 | union intel_x86_pebs_dse dse; | 125 | union intel_x86_pebs_dse dse; |
@@ -165,6 +178,22 @@ struct pebs_record_nhm { | |||
165 | u64 status, dla, dse, lat; | 178 | u64 status, dla, dse, lat; |
166 | }; | 179 | }; |
167 | 180 | ||
181 | /* | ||
182 | * Same as pebs_record_nhm, with two additional fields. | ||
183 | */ | ||
184 | struct pebs_record_hsw { | ||
185 | struct pebs_record_nhm nhm; | ||
186 | /* | ||
187 | * Real IP of the event. In the Intel documentation this | ||
188 | * is called eventingrip. | ||
189 | */ | ||
190 | u64 real_ip; | ||
191 | /* | ||
192 | * TSX tuning information field: abort cycles and abort flags. | ||
193 | */ | ||
194 | u64 tsx_tuning; | ||
195 | }; | ||
196 | |||
168 | void init_debug_store_on_cpu(int cpu) | 197 | void init_debug_store_on_cpu(int cpu) |
169 | { | 198 | { |
170 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; | 199 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; |
@@ -548,6 +577,42 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = { | |||
548 | EVENT_CONSTRAINT_END | 577 | EVENT_CONSTRAINT_END |
549 | }; | 578 | }; |
550 | 579 | ||
580 | struct event_constraint intel_hsw_pebs_event_constraints[] = { | ||
581 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | ||
582 | INTEL_PST_HSW_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ | ||
583 | INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ | ||
584 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | ||
585 | INTEL_UEVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */ | ||
586 | INTEL_UEVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */ | ||
587 | INTEL_UEVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.NEAR_TAKEN */ | ||
588 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.* */ | ||
589 | /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ | ||
590 | INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), | ||
591 | /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ | ||
592 | INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), | ||
593 | INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ | ||
594 | INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ | ||
595 | /* MEM_UOPS_RETIRED.SPLIT_STORES */ | ||
596 | INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), | ||
597 | INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ | ||
598 | INTEL_PST_HSW_CONSTRAINT(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ | ||
599 | INTEL_UEVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */ | ||
600 | INTEL_UEVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */ | ||
601 | INTEL_UEVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L3_HIT */ | ||
602 | /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */ | ||
603 | INTEL_UEVENT_CONSTRAINT(0x40d1, 0xf), | ||
604 | /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */ | ||
605 | INTEL_UEVENT_CONSTRAINT(0x01d2, 0xf), | ||
606 | /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */ | ||
607 | INTEL_UEVENT_CONSTRAINT(0x02d2, 0xf), | ||
608 | /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM */ | ||
609 | INTEL_UEVENT_CONSTRAINT(0x01d3, 0xf), | ||
610 | INTEL_UEVENT_CONSTRAINT(0x04c8, 0xf), /* HLE_RETIRED.Abort */ | ||
611 | INTEL_UEVENT_CONSTRAINT(0x04c9, 0xf), /* RTM_RETIRED.Abort */ | ||
612 | |||
613 | EVENT_CONSTRAINT_END | ||
614 | }; | ||
615 | |||
551 | struct event_constraint *intel_pebs_constraints(struct perf_event *event) | 616 | struct event_constraint *intel_pebs_constraints(struct perf_event *event) |
552 | { | 617 | { |
553 | struct event_constraint *c; | 618 | struct event_constraint *c; |
@@ -588,6 +653,12 @@ void intel_pmu_pebs_disable(struct perf_event *event) | |||
588 | struct hw_perf_event *hwc = &event->hw; | 653 | struct hw_perf_event *hwc = &event->hw; |
589 | 654 | ||
590 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); | 655 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); |
656 | |||
657 | if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_LDLAT) | ||
658 | cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); | ||
659 | else if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_ST) | ||
660 | cpuc->pebs_enabled &= ~(1ULL << 63); | ||
661 | |||
591 | if (cpuc->enabled) | 662 | if (cpuc->enabled) |
592 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); | 663 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); |
593 | 664 | ||
@@ -697,6 +768,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
697 | */ | 768 | */ |
698 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 769 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
699 | struct pebs_record_nhm *pebs = __pebs; | 770 | struct pebs_record_nhm *pebs = __pebs; |
771 | struct pebs_record_hsw *pebs_hsw = __pebs; | ||
700 | struct perf_sample_data data; | 772 | struct perf_sample_data data; |
701 | struct pt_regs regs; | 773 | struct pt_regs regs; |
702 | u64 sample_type; | 774 | u64 sample_type; |
@@ -706,7 +778,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
706 | return; | 778 | return; |
707 | 779 | ||
708 | fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; | 780 | fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; |
709 | fst = event->hw.flags & PERF_X86_EVENT_PEBS_ST; | 781 | fst = event->hw.flags & (PERF_X86_EVENT_PEBS_ST | |
782 | PERF_X86_EVENT_PEBS_ST_HSW); | ||
710 | 783 | ||
711 | perf_sample_data_init(&data, 0, event->hw.last_period); | 784 | perf_sample_data_init(&data, 0, event->hw.last_period); |
712 | 785 | ||
@@ -717,9 +790,6 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
717 | * if PEBS-LL or PreciseStore | 790 | * if PEBS-LL or PreciseStore |
718 | */ | 791 | */ |
719 | if (fll || fst) { | 792 | if (fll || fst) { |
720 | if (sample_type & PERF_SAMPLE_ADDR) | ||
721 | data.addr = pebs->dla; | ||
722 | |||
723 | /* | 793 | /* |
724 | * Use latency for weight (only avail with PEBS-LL) | 794 | * Use latency for weight (only avail with PEBS-LL) |
725 | */ | 795 | */ |
@@ -732,6 +802,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
732 | if (sample_type & PERF_SAMPLE_DATA_SRC) { | 802 | if (sample_type & PERF_SAMPLE_DATA_SRC) { |
733 | if (fll) | 803 | if (fll) |
734 | data.data_src.val = load_latency_data(pebs->dse); | 804 | data.data_src.val = load_latency_data(pebs->dse); |
805 | else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) | ||
806 | data.data_src.val = | ||
807 | precise_store_data_hsw(pebs->dse); | ||
735 | else | 808 | else |
736 | data.data_src.val = precise_store_data(pebs->dse); | 809 | data.data_src.val = precise_store_data(pebs->dse); |
737 | } | 810 | } |
@@ -753,11 +826,18 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
753 | regs.bp = pebs->bp; | 826 | regs.bp = pebs->bp; |
754 | regs.sp = pebs->sp; | 827 | regs.sp = pebs->sp; |
755 | 828 | ||
756 | if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s)) | 829 | if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { |
830 | regs.ip = pebs_hsw->real_ip; | ||
831 | regs.flags |= PERF_EFLAGS_EXACT; | ||
832 | } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s)) | ||
757 | regs.flags |= PERF_EFLAGS_EXACT; | 833 | regs.flags |= PERF_EFLAGS_EXACT; |
758 | else | 834 | else |
759 | regs.flags &= ~PERF_EFLAGS_EXACT; | 835 | regs.flags &= ~PERF_EFLAGS_EXACT; |
760 | 836 | ||
837 | if ((event->attr.sample_type & PERF_SAMPLE_ADDR) && | ||
838 | x86_pmu.intel_cap.pebs_format >= 1) | ||
839 | data.addr = pebs->dla; | ||
840 | |||
761 | if (has_branch_stack(event)) | 841 | if (has_branch_stack(event)) |
762 | data.br_stack = &cpuc->lbr_stack; | 842 | data.br_stack = &cpuc->lbr_stack; |
763 | 843 | ||
@@ -806,35 +886,22 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
806 | __intel_pmu_pebs_event(event, iregs, at); | 886 | __intel_pmu_pebs_event(event, iregs, at); |
807 | } | 887 | } |
808 | 888 | ||
809 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | 889 | static void __intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, void *at, |
890 | void *top) | ||
810 | { | 891 | { |
811 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 892 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
812 | struct debug_store *ds = cpuc->ds; | 893 | struct debug_store *ds = cpuc->ds; |
813 | struct pebs_record_nhm *at, *top; | ||
814 | struct perf_event *event = NULL; | 894 | struct perf_event *event = NULL; |
815 | u64 status = 0; | 895 | u64 status = 0; |
816 | int bit, n; | 896 | int bit; |
817 | |||
818 | if (!x86_pmu.pebs_active) | ||
819 | return; | ||
820 | |||
821 | at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; | ||
822 | top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; | ||
823 | 897 | ||
824 | ds->pebs_index = ds->pebs_buffer_base; | 898 | ds->pebs_index = ds->pebs_buffer_base; |
825 | 899 | ||
826 | n = top - at; | 900 | for (; at < top; at += x86_pmu.pebs_record_size) { |
827 | if (n <= 0) | 901 | struct pebs_record_nhm *p = at; |
828 | return; | ||
829 | |||
830 | /* | ||
831 | * Should not happen, we program the threshold at 1 and do not | ||
832 | * set a reset value. | ||
833 | */ | ||
834 | WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n); | ||
835 | 902 | ||
836 | for ( ; at < top; at++) { | 903 | for_each_set_bit(bit, (unsigned long *)&p->status, |
837 | for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) { | 904 | x86_pmu.max_pebs_events) { |
838 | event = cpuc->events[bit]; | 905 | event = cpuc->events[bit]; |
839 | if (!test_bit(bit, cpuc->active_mask)) | 906 | if (!test_bit(bit, cpuc->active_mask)) |
840 | continue; | 907 | continue; |
@@ -857,6 +924,61 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | |||
857 | } | 924 | } |
858 | } | 925 | } |
859 | 926 | ||
927 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | ||
928 | { | ||
929 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
930 | struct debug_store *ds = cpuc->ds; | ||
931 | struct pebs_record_nhm *at, *top; | ||
932 | int n; | ||
933 | |||
934 | if (!x86_pmu.pebs_active) | ||
935 | return; | ||
936 | |||
937 | at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; | ||
938 | top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; | ||
939 | |||
940 | ds->pebs_index = ds->pebs_buffer_base; | ||
941 | |||
942 | n = top - at; | ||
943 | if (n <= 0) | ||
944 | return; | ||
945 | |||
946 | /* | ||
947 | * Should not happen, we program the threshold at 1 and do not | ||
948 | * set a reset value. | ||
949 | */ | ||
950 | WARN_ONCE(n > x86_pmu.max_pebs_events, | ||
951 | "Unexpected number of pebs records %d\n", n); | ||
952 | |||
953 | return __intel_pmu_drain_pebs_nhm(iregs, at, top); | ||
954 | } | ||
955 | |||
956 | static void intel_pmu_drain_pebs_hsw(struct pt_regs *iregs) | ||
957 | { | ||
958 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
959 | struct debug_store *ds = cpuc->ds; | ||
960 | struct pebs_record_hsw *at, *top; | ||
961 | int n; | ||
962 | |||
963 | if (!x86_pmu.pebs_active) | ||
964 | return; | ||
965 | |||
966 | at = (struct pebs_record_hsw *)(unsigned long)ds->pebs_buffer_base; | ||
967 | top = (struct pebs_record_hsw *)(unsigned long)ds->pebs_index; | ||
968 | |||
969 | n = top - at; | ||
970 | if (n <= 0) | ||
971 | return; | ||
972 | /* | ||
973 | * Should not happen, we program the threshold at 1 and do not | ||
974 | * set a reset value. | ||
975 | */ | ||
976 | WARN_ONCE(n > x86_pmu.max_pebs_events, | ||
977 | "Unexpected number of pebs records %d\n", n); | ||
978 | |||
979 | return __intel_pmu_drain_pebs_nhm(iregs, at, top); | ||
980 | } | ||
981 | |||
860 | /* | 982 | /* |
861 | * BTS, PEBS probe and setup | 983 | * BTS, PEBS probe and setup |
862 | */ | 984 | */ |
@@ -888,6 +1010,12 @@ void intel_ds_init(void) | |||
888 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; | 1010 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; |
889 | break; | 1011 | break; |
890 | 1012 | ||
1013 | case 2: | ||
1014 | pr_cont("PEBS fmt2%c, ", pebs_type); | ||
1015 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw); | ||
1016 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_hsw; | ||
1017 | break; | ||
1018 | |||
891 | default: | 1019 | default: |
892 | printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); | 1020 | printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); |
893 | x86_pmu.pebs = 0; | 1021 | x86_pmu.pebs = 0; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index d978353c939b..d5be06a5005e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -12,6 +12,16 @@ enum { | |||
12 | LBR_FORMAT_LIP = 0x01, | 12 | LBR_FORMAT_LIP = 0x01, |
13 | LBR_FORMAT_EIP = 0x02, | 13 | LBR_FORMAT_EIP = 0x02, |
14 | LBR_FORMAT_EIP_FLAGS = 0x03, | 14 | LBR_FORMAT_EIP_FLAGS = 0x03, |
15 | LBR_FORMAT_EIP_FLAGS2 = 0x04, | ||
16 | LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_EIP_FLAGS2, | ||
17 | }; | ||
18 | |||
19 | static enum { | ||
20 | LBR_EIP_FLAGS = 1, | ||
21 | LBR_TSX = 2, | ||
22 | } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = { | ||
23 | [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS, | ||
24 | [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX, | ||
15 | }; | 25 | }; |
16 | 26 | ||
17 | /* | 27 | /* |
@@ -56,6 +66,8 @@ enum { | |||
56 | LBR_FAR) | 66 | LBR_FAR) |
57 | 67 | ||
58 | #define LBR_FROM_FLAG_MISPRED (1ULL << 63) | 68 | #define LBR_FROM_FLAG_MISPRED (1ULL << 63) |
69 | #define LBR_FROM_FLAG_IN_TX (1ULL << 62) | ||
70 | #define LBR_FROM_FLAG_ABORT (1ULL << 61) | ||
59 | 71 | ||
60 | #define for_each_branch_sample_type(x) \ | 72 | #define for_each_branch_sample_type(x) \ |
61 | for ((x) = PERF_SAMPLE_BRANCH_USER; \ | 73 | for ((x) = PERF_SAMPLE_BRANCH_USER; \ |
@@ -81,9 +93,13 @@ enum { | |||
81 | X86_BR_JMP = 1 << 9, /* jump */ | 93 | X86_BR_JMP = 1 << 9, /* jump */ |
82 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ | 94 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ |
83 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ | 95 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ |
96 | X86_BR_ABORT = 1 << 12,/* transaction abort */ | ||
97 | X86_BR_IN_TX = 1 << 13,/* in transaction */ | ||
98 | X86_BR_NO_TX = 1 << 14,/* not in transaction */ | ||
84 | }; | 99 | }; |
85 | 100 | ||
86 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) | 101 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) |
102 | #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX) | ||
87 | 103 | ||
88 | #define X86_BR_ANY \ | 104 | #define X86_BR_ANY \ |
89 | (X86_BR_CALL |\ | 105 | (X86_BR_CALL |\ |
@@ -95,6 +111,7 @@ enum { | |||
95 | X86_BR_JCC |\ | 111 | X86_BR_JCC |\ |
96 | X86_BR_JMP |\ | 112 | X86_BR_JMP |\ |
97 | X86_BR_IRQ |\ | 113 | X86_BR_IRQ |\ |
114 | X86_BR_ABORT |\ | ||
98 | X86_BR_IND_CALL) | 115 | X86_BR_IND_CALL) |
99 | 116 | ||
100 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) | 117 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) |
@@ -270,21 +287,31 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) | |||
270 | 287 | ||
271 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | 288 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
272 | unsigned long lbr_idx = (tos - i) & mask; | 289 | unsigned long lbr_idx = (tos - i) & mask; |
273 | u64 from, to, mis = 0, pred = 0; | 290 | u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0; |
291 | int skip = 0; | ||
292 | int lbr_flags = lbr_desc[lbr_format]; | ||
274 | 293 | ||
275 | rdmsrl(x86_pmu.lbr_from + lbr_idx, from); | 294 | rdmsrl(x86_pmu.lbr_from + lbr_idx, from); |
276 | rdmsrl(x86_pmu.lbr_to + lbr_idx, to); | 295 | rdmsrl(x86_pmu.lbr_to + lbr_idx, to); |
277 | 296 | ||
278 | if (lbr_format == LBR_FORMAT_EIP_FLAGS) { | 297 | if (lbr_flags & LBR_EIP_FLAGS) { |
279 | mis = !!(from & LBR_FROM_FLAG_MISPRED); | 298 | mis = !!(from & LBR_FROM_FLAG_MISPRED); |
280 | pred = !mis; | 299 | pred = !mis; |
281 | from = (u64)((((s64)from) << 1) >> 1); | 300 | skip = 1; |
301 | } | ||
302 | if (lbr_flags & LBR_TSX) { | ||
303 | in_tx = !!(from & LBR_FROM_FLAG_IN_TX); | ||
304 | abort = !!(from & LBR_FROM_FLAG_ABORT); | ||
305 | skip = 3; | ||
282 | } | 306 | } |
307 | from = (u64)((((s64)from) << skip) >> skip); | ||
283 | 308 | ||
284 | cpuc->lbr_entries[i].from = from; | 309 | cpuc->lbr_entries[i].from = from; |
285 | cpuc->lbr_entries[i].to = to; | 310 | cpuc->lbr_entries[i].to = to; |
286 | cpuc->lbr_entries[i].mispred = mis; | 311 | cpuc->lbr_entries[i].mispred = mis; |
287 | cpuc->lbr_entries[i].predicted = pred; | 312 | cpuc->lbr_entries[i].predicted = pred; |
313 | cpuc->lbr_entries[i].in_tx = in_tx; | ||
314 | cpuc->lbr_entries[i].abort = abort; | ||
288 | cpuc->lbr_entries[i].reserved = 0; | 315 | cpuc->lbr_entries[i].reserved = 0; |
289 | } | 316 | } |
290 | cpuc->lbr_stack.nr = i; | 317 | cpuc->lbr_stack.nr = i; |
@@ -310,7 +337,7 @@ void intel_pmu_lbr_read(void) | |||
310 | * - in case there is no HW filter | 337 | * - in case there is no HW filter |
311 | * - in case the HW filter has errata or limitations | 338 | * - in case the HW filter has errata or limitations |
312 | */ | 339 | */ |
313 | static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) | 340 | static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
314 | { | 341 | { |
315 | u64 br_type = event->attr.branch_sample_type; | 342 | u64 br_type = event->attr.branch_sample_type; |
316 | int mask = 0; | 343 | int mask = 0; |
@@ -318,11 +345,8 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) | |||
318 | if (br_type & PERF_SAMPLE_BRANCH_USER) | 345 | if (br_type & PERF_SAMPLE_BRANCH_USER) |
319 | mask |= X86_BR_USER; | 346 | mask |= X86_BR_USER; |
320 | 347 | ||
321 | if (br_type & PERF_SAMPLE_BRANCH_KERNEL) { | 348 | if (br_type & PERF_SAMPLE_BRANCH_KERNEL) |
322 | if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) | ||
323 | return -EACCES; | ||
324 | mask |= X86_BR_KERNEL; | 349 | mask |= X86_BR_KERNEL; |
325 | } | ||
326 | 350 | ||
327 | /* we ignore BRANCH_HV here */ | 351 | /* we ignore BRANCH_HV here */ |
328 | 352 | ||
@@ -337,13 +361,21 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) | |||
337 | 361 | ||
338 | if (br_type & PERF_SAMPLE_BRANCH_IND_CALL) | 362 | if (br_type & PERF_SAMPLE_BRANCH_IND_CALL) |
339 | mask |= X86_BR_IND_CALL; | 363 | mask |= X86_BR_IND_CALL; |
364 | |||
365 | if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX) | ||
366 | mask |= X86_BR_ABORT; | ||
367 | |||
368 | if (br_type & PERF_SAMPLE_BRANCH_IN_TX) | ||
369 | mask |= X86_BR_IN_TX; | ||
370 | |||
371 | if (br_type & PERF_SAMPLE_BRANCH_NO_TX) | ||
372 | mask |= X86_BR_NO_TX; | ||
373 | |||
340 | /* | 374 | /* |
341 | * stash actual user request into reg, it may | 375 | * stash actual user request into reg, it may |
342 | * be used by fixup code for some CPU | 376 | * be used by fixup code for some CPU |
343 | */ | 377 | */ |
344 | event->hw.branch_reg.reg = mask; | 378 | event->hw.branch_reg.reg = mask; |
345 | |||
346 | return 0; | ||
347 | } | 379 | } |
348 | 380 | ||
349 | /* | 381 | /* |
@@ -391,9 +423,7 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event) | |||
391 | /* | 423 | /* |
392 | * setup SW LBR filter | 424 | * setup SW LBR filter |
393 | */ | 425 | */ |
394 | ret = intel_pmu_setup_sw_lbr_filter(event); | 426 | intel_pmu_setup_sw_lbr_filter(event); |
395 | if (ret) | ||
396 | return ret; | ||
397 | 427 | ||
398 | /* | 428 | /* |
399 | * setup HW LBR filter, if any | 429 | * setup HW LBR filter, if any |
@@ -415,7 +445,7 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event) | |||
415 | * decoded (e.g., text page not present), then X86_BR_NONE is | 445 | * decoded (e.g., text page not present), then X86_BR_NONE is |
416 | * returned. | 446 | * returned. |
417 | */ | 447 | */ |
418 | static int branch_type(unsigned long from, unsigned long to) | 448 | static int branch_type(unsigned long from, unsigned long to, int abort) |
419 | { | 449 | { |
420 | struct insn insn; | 450 | struct insn insn; |
421 | void *addr; | 451 | void *addr; |
@@ -435,6 +465,9 @@ static int branch_type(unsigned long from, unsigned long to) | |||
435 | if (from == 0 || to == 0) | 465 | if (from == 0 || to == 0) |
436 | return X86_BR_NONE; | 466 | return X86_BR_NONE; |
437 | 467 | ||
468 | if (abort) | ||
469 | return X86_BR_ABORT | to_plm; | ||
470 | |||
438 | if (from_plm == X86_BR_USER) { | 471 | if (from_plm == X86_BR_USER) { |
439 | /* | 472 | /* |
440 | * can happen if measuring at the user level only | 473 | * can happen if measuring at the user level only |
@@ -581,7 +614,13 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) | |||
581 | from = cpuc->lbr_entries[i].from; | 614 | from = cpuc->lbr_entries[i].from; |
582 | to = cpuc->lbr_entries[i].to; | 615 | to = cpuc->lbr_entries[i].to; |
583 | 616 | ||
584 | type = branch_type(from, to); | 617 | type = branch_type(from, to, cpuc->lbr_entries[i].abort); |
618 | if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) { | ||
619 | if (cpuc->lbr_entries[i].in_tx) | ||
620 | type |= X86_BR_IN_TX; | ||
621 | else | ||
622 | type |= X86_BR_NO_TX; | ||
623 | } | ||
585 | 624 | ||
586 | /* if type does not correspond, then discard */ | 625 | /* if type does not correspond, then discard */ |
587 | if (type == X86_BR_NONE || (br_sel & type) != type) { | 626 | if (type == X86_BR_NONE || (br_sel & type) != type) { |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 52441a2af538..1fb6c72717bd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = { | |||
314 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { | 314 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { |
315 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), | 315 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), |
316 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), | 316 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), |
317 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), | 317 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), |
318 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), | 318 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), |
319 | { /* end: all zeroes */ }, | 319 | { /* end: all zeroes */ }, |
320 | }; | 320 | }; |
321 | 321 | ||
@@ -536,7 +536,7 @@ __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *eve | |||
536 | if (!uncore_box_is_fake(box)) | 536 | if (!uncore_box_is_fake(box)) |
537 | reg1->alloc |= alloc; | 537 | reg1->alloc |= alloc; |
538 | 538 | ||
539 | return 0; | 539 | return NULL; |
540 | fail: | 540 | fail: |
541 | for (; i >= 0; i--) { | 541 | for (; i >= 0; i--) { |
542 | if (alloc & (0x1 << i)) | 542 | if (alloc & (0x1 << i)) |
@@ -644,7 +644,7 @@ snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | |||
644 | (!uncore_box_is_fake(box) && reg1->alloc)) | 644 | (!uncore_box_is_fake(box) && reg1->alloc)) |
645 | return NULL; | 645 | return NULL; |
646 | again: | 646 | again: |
647 | mask = 0xff << (idx * 8); | 647 | mask = 0xffULL << (idx * 8); |
648 | raw_spin_lock_irqsave(&er->lock, flags); | 648 | raw_spin_lock_irqsave(&er->lock, flags); |
649 | if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || | 649 | if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || |
650 | !((config1 ^ er->config) & mask)) { | 650 | !((config1 ^ er->config) & mask)) { |
@@ -1923,7 +1923,7 @@ static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modif | |||
1923 | { | 1923 | { |
1924 | struct hw_perf_event *hwc = &event->hw; | 1924 | struct hw_perf_event *hwc = &event->hw; |
1925 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | 1925 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
1926 | int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); | 1926 | u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); |
1927 | u64 config = reg1->config; | 1927 | u64 config = reg1->config; |
1928 | 1928 | ||
1929 | /* get the non-shared control bits and shift them */ | 1929 | /* get the non-shared control bits and shift them */ |
@@ -2723,15 +2723,16 @@ static void uncore_put_event_constraint(struct intel_uncore_box *box, struct per | |||
2723 | static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) | 2723 | static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) |
2724 | { | 2724 | { |
2725 | unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; | 2725 | unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; |
2726 | struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX]; | 2726 | struct event_constraint *c; |
2727 | int i, wmin, wmax, ret = 0; | 2727 | int i, wmin, wmax, ret = 0; |
2728 | struct hw_perf_event *hwc; | 2728 | struct hw_perf_event *hwc; |
2729 | 2729 | ||
2730 | bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); | 2730 | bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); |
2731 | 2731 | ||
2732 | for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { | 2732 | for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { |
2733 | hwc = &box->event_list[i]->hw; | ||
2733 | c = uncore_get_event_constraint(box, box->event_list[i]); | 2734 | c = uncore_get_event_constraint(box, box->event_list[i]); |
2734 | constraints[i] = c; | 2735 | hwc->constraint = c; |
2735 | wmin = min(wmin, c->weight); | 2736 | wmin = min(wmin, c->weight); |
2736 | wmax = max(wmax, c->weight); | 2737 | wmax = max(wmax, c->weight); |
2737 | } | 2738 | } |
@@ -2739,7 +2740,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int | |||
2739 | /* fastpath, try to reuse previous register */ | 2740 | /* fastpath, try to reuse previous register */ |
2740 | for (i = 0; i < n; i++) { | 2741 | for (i = 0; i < n; i++) { |
2741 | hwc = &box->event_list[i]->hw; | 2742 | hwc = &box->event_list[i]->hw; |
2742 | c = constraints[i]; | 2743 | c = hwc->constraint; |
2743 | 2744 | ||
2744 | /* never assigned */ | 2745 | /* never assigned */ |
2745 | if (hwc->idx == -1) | 2746 | if (hwc->idx == -1) |
@@ -2759,7 +2760,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int | |||
2759 | } | 2760 | } |
2760 | /* slow path */ | 2761 | /* slow path */ |
2761 | if (i != n) | 2762 | if (i != n) |
2762 | ret = perf_assign_events(constraints, n, wmin, wmax, assign); | 2763 | ret = perf_assign_events(box->event_list, n, |
2764 | wmin, wmax, assign); | ||
2763 | 2765 | ||
2764 | if (!assign || ret) { | 2766 | if (!assign || ret) { |
2765 | for (i = 0; i < n; i++) | 2767 | for (i = 0; i < n; i++) |
@@ -3295,7 +3297,7 @@ static void __init uncore_pci_exit(void) | |||
3295 | /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ | 3297 | /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ |
3296 | static LIST_HEAD(boxes_to_free); | 3298 | static LIST_HEAD(boxes_to_free); |
3297 | 3299 | ||
3298 | static void __cpuinit uncore_kfree_boxes(void) | 3300 | static void uncore_kfree_boxes(void) |
3299 | { | 3301 | { |
3300 | struct intel_uncore_box *box; | 3302 | struct intel_uncore_box *box; |
3301 | 3303 | ||
@@ -3307,7 +3309,7 @@ static void __cpuinit uncore_kfree_boxes(void) | |||
3307 | } | 3309 | } |
3308 | } | 3310 | } |
3309 | 3311 | ||
3310 | static void __cpuinit uncore_cpu_dying(int cpu) | 3312 | static void uncore_cpu_dying(int cpu) |
3311 | { | 3313 | { |
3312 | struct intel_uncore_type *type; | 3314 | struct intel_uncore_type *type; |
3313 | struct intel_uncore_pmu *pmu; | 3315 | struct intel_uncore_pmu *pmu; |
@@ -3326,7 +3328,7 @@ static void __cpuinit uncore_cpu_dying(int cpu) | |||
3326 | } | 3328 | } |
3327 | } | 3329 | } |
3328 | 3330 | ||
3329 | static int __cpuinit uncore_cpu_starting(int cpu) | 3331 | static int uncore_cpu_starting(int cpu) |
3330 | { | 3332 | { |
3331 | struct intel_uncore_type *type; | 3333 | struct intel_uncore_type *type; |
3332 | struct intel_uncore_pmu *pmu; | 3334 | struct intel_uncore_pmu *pmu; |
@@ -3369,7 +3371,7 @@ static int __cpuinit uncore_cpu_starting(int cpu) | |||
3369 | return 0; | 3371 | return 0; |
3370 | } | 3372 | } |
3371 | 3373 | ||
3372 | static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) | 3374 | static int uncore_cpu_prepare(int cpu, int phys_id) |
3373 | { | 3375 | { |
3374 | struct intel_uncore_type *type; | 3376 | struct intel_uncore_type *type; |
3375 | struct intel_uncore_pmu *pmu; | 3377 | struct intel_uncore_pmu *pmu; |
@@ -3395,7 +3397,7 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) | |||
3395 | return 0; | 3397 | return 0; |
3396 | } | 3398 | } |
3397 | 3399 | ||
3398 | static void __cpuinit | 3400 | static void |
3399 | uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) | 3401 | uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) |
3400 | { | 3402 | { |
3401 | struct intel_uncore_type *type; | 3403 | struct intel_uncore_type *type; |
@@ -3433,7 +3435,7 @@ uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_c | |||
3433 | } | 3435 | } |
3434 | } | 3436 | } |
3435 | 3437 | ||
3436 | static void __cpuinit uncore_event_exit_cpu(int cpu) | 3438 | static void uncore_event_exit_cpu(int cpu) |
3437 | { | 3439 | { |
3438 | int i, phys_id, target; | 3440 | int i, phys_id, target; |
3439 | 3441 | ||
@@ -3461,7 +3463,7 @@ static void __cpuinit uncore_event_exit_cpu(int cpu) | |||
3461 | uncore_change_context(pci_uncores, cpu, target); | 3463 | uncore_change_context(pci_uncores, cpu, target); |
3462 | } | 3464 | } |
3463 | 3465 | ||
3464 | static void __cpuinit uncore_event_init_cpu(int cpu) | 3466 | static void uncore_event_init_cpu(int cpu) |
3465 | { | 3467 | { |
3466 | int i, phys_id; | 3468 | int i, phys_id; |
3467 | 3469 | ||
@@ -3477,8 +3479,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu) | |||
3477 | uncore_change_context(pci_uncores, -1, cpu); | 3479 | uncore_change_context(pci_uncores, -1, cpu); |
3478 | } | 3480 | } |
3479 | 3481 | ||
3480 | static int | 3482 | static int uncore_cpu_notifier(struct notifier_block *self, |
3481 | __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 3483 | unsigned long action, void *hcpu) |
3482 | { | 3484 | { |
3483 | unsigned int cpu = (long)hcpu; | 3485 | unsigned int cpu = (long)hcpu; |
3484 | 3486 | ||
@@ -3518,7 +3520,7 @@ static int | |||
3518 | return NOTIFY_OK; | 3520 | return NOTIFY_OK; |
3519 | } | 3521 | } |
3520 | 3522 | ||
3521 | static struct notifier_block uncore_cpu_nb __cpuinitdata = { | 3523 | static struct notifier_block uncore_cpu_nb = { |
3522 | .notifier_call = uncore_cpu_notifier, | 3524 | .notifier_call = uncore_cpu_notifier, |
3523 | /* | 3525 | /* |
3524 | * to migrate uncore events, our notifier should be executed | 3526 | * to migrate uncore events, our notifier should be executed |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index f9528917f6e8..47b3d00c9d89 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h | |||
@@ -337,10 +337,10 @@ | |||
337 | NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) | 337 | NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) |
338 | 338 | ||
339 | #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) | 339 | #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) |
340 | #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n))) | 340 | #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n))) |
341 | 341 | ||
342 | #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) | 342 | #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) |
343 | #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (12 + 3 * (n))) | 343 | #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n))) |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * use the 9~13 bits to select event If the 7th bit is not set, | 346 | * use the 9~13 bits to select event If the 7th bit is not set, |
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c index 7b3fe56b1c21..31f0f335ed22 100644 --- a/arch/x86/kernel/cpu/powerflags.c +++ b/arch/x86/kernel/cpu/powerflags.c | |||
@@ -11,10 +11,10 @@ const char *const x86_power_flags[32] = { | |||
11 | "fid", /* frequency id control */ | 11 | "fid", /* frequency id control */ |
12 | "vid", /* voltage id control */ | 12 | "vid", /* voltage id control */ |
13 | "ttp", /* thermal trip */ | 13 | "ttp", /* thermal trip */ |
14 | "tm", | 14 | "tm", /* hardware thermal control */ |
15 | "stc", | 15 | "stc", /* software thermal control */ |
16 | "100mhzsteps", | 16 | "100mhzsteps", /* 100 MHz multiplier control */ |
17 | "hwpstate", | 17 | "hwpstate", /* hardware P-state control */ |
18 | "", /* tsc invariant mapped to constant_tsc */ | 18 | "", /* tsc invariant mapped to constant_tsc */ |
19 | "cpb", /* core performance boost */ | 19 | "cpb", /* core performance boost */ |
20 | "eff_freq_ro", /* Readonly aperf/mperf */ | 20 | "eff_freq_ro", /* Readonly aperf/mperf */ |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 37a198bd48c8..aee6317b902f 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -37,8 +37,8 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | |||
37 | static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no", | 37 | static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no", |
38 | static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no", | 38 | static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no", |
39 | static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no", | 39 | static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no", |
40 | c->hard_math ? "yes" : "no", | 40 | static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no", |
41 | c->hard_math ? "yes" : "no", | 41 | static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no", |
42 | c->cpuid_level, | 42 | c->cpuid_level, |
43 | c->wp_works_ok ? "yes" : "no"); | 43 | c->wp_works_ok ? "yes" : "no"); |
44 | } | 44 | } |
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index feca286c2bb4..88db010845cb 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c | |||
@@ -52,7 +52,7 @@ static inline int rdrand_long(unsigned long *v) | |||
52 | */ | 52 | */ |
53 | #define RESEED_LOOP ((512*128)/sizeof(unsigned long)) | 53 | #define RESEED_LOOP ((512*128)/sizeof(unsigned long)) |
54 | 54 | ||
55 | void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c) | 55 | void x86_init_rdrand(struct cpuinfo_x86 *c) |
56 | { | 56 | { |
57 | #ifdef CONFIG_ARCH_RANDOM | 57 | #ifdef CONFIG_ARCH_RANDOM |
58 | unsigned long tmp; | 58 | unsigned long tmp; |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index d92b5dad15dd..f2cc63e9cf08 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -24,13 +24,13 @@ enum cpuid_regs { | |||
24 | CR_EBX | 24 | CR_EBX |
25 | }; | 25 | }; |
26 | 26 | ||
27 | void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | 27 | void init_scattered_cpuid_features(struct cpuinfo_x86 *c) |
28 | { | 28 | { |
29 | u32 max_level; | 29 | u32 max_level; |
30 | u32 regs[4]; | 30 | u32 regs[4]; |
31 | const struct cpuid_bit *cb; | 31 | const struct cpuid_bit *cb; |
32 | 32 | ||
33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | 33 | static const struct cpuid_bit cpuid_bits[] = { |
34 | { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, | 34 | { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, |
35 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, | 35 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, |
36 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, | 36 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, |
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 4397e987a1cf..4c60eaf0571c 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c | |||
@@ -26,7 +26,7 @@ | |||
26 | * exists, use it for populating initial_apicid and cpu topology | 26 | * exists, use it for populating initial_apicid and cpu topology |
27 | * detection. | 27 | * detection. |
28 | */ | 28 | */ |
29 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | 29 | void detect_extended_topology(struct cpuinfo_x86 *c) |
30 | { | 30 | { |
31 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
32 | unsigned int eax, ebx, ecx, edx, sub_index; | 32 | unsigned int eax, ebx, ecx, edx, sub_index; |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index 28000743bbb0..aa0430d69b90 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
6 | #include "cpu.h" | 6 | #include "cpu.h" |
7 | 7 | ||
8 | static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) | 8 | static void early_init_transmeta(struct cpuinfo_x86 *c) |
9 | { | 9 | { |
10 | u32 xlvl; | 10 | u32 xlvl; |
11 | 11 | ||
@@ -17,7 +17,7 @@ static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) | |||
17 | } | 17 | } |
18 | } | 18 | } |
19 | 19 | ||
20 | static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | 20 | static void init_transmeta(struct cpuinfo_x86 *c) |
21 | { | 21 | { |
22 | unsigned int cap_mask, uk, max, dummy; | 22 | unsigned int cap_mask, uk, max, dummy; |
23 | unsigned int cms_rev1, cms_rev2; | 23 | unsigned int cms_rev1, cms_rev2; |
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
98 | #endif | 98 | #endif |
99 | } | 99 | } |
100 | 100 | ||
101 | static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { | 101 | static const struct cpu_dev transmeta_cpu_dev = { |
102 | .c_vendor = "Transmeta", | 102 | .c_vendor = "Transmeta", |
103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, | 103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, |
104 | .c_early_init = early_init_transmeta, | 104 | .c_early_init = early_init_transmeta, |
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index fd2c37bf7acb..202759a14121 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * so no special init takes place. | 8 | * so no special init takes place. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | static const struct cpu_dev __cpuinitconst umc_cpu_dev = { | 11 | static const struct cpu_dev umc_cpu_dev = { |
12 | .c_vendor = "UMC", | 12 | .c_vendor = "UMC", |
13 | .c_ident = { "UMC UMC UMC" }, | 13 | .c_ident = { "UMC UMC UMC" }, |
14 | .c_models = { | 14 | .c_models = { |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 03a36321ec54..7076878404ec 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -122,7 +122,7 @@ static bool __init vmware_platform(void) | |||
122 | * so that the kernel could just trust the hypervisor with providing a | 122 | * so that the kernel could just trust the hypervisor with providing a |
123 | * reliable virtual TSC that is suitable for timekeeping. | 123 | * reliable virtual TSC that is suitable for timekeeping. |
124 | */ | 124 | */ |
125 | static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) | 125 | static void vmware_set_cpu_features(struct cpuinfo_x86 *c) |
126 | { | 126 | { |
127 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 127 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
128 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); | 128 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 1e4dbcfe6d31..7d9481c743f8 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -137,7 +137,7 @@ static const struct file_operations cpuid_fops = { | |||
137 | .open = cpuid_open, | 137 | .open = cpuid_open, |
138 | }; | 138 | }; |
139 | 139 | ||
140 | static __cpuinit int cpuid_device_create(int cpu) | 140 | static int cpuid_device_create(int cpu) |
141 | { | 141 | { |
142 | struct device *dev; | 142 | struct device *dev; |
143 | 143 | ||
@@ -151,9 +151,8 @@ static void cpuid_device_destroy(int cpu) | |||
151 | device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); | 151 | device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); |
152 | } | 152 | } |
153 | 153 | ||
154 | static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, | 154 | static int cpuid_class_cpu_callback(struct notifier_block *nfb, |
155 | unsigned long action, | 155 | unsigned long action, void *hcpu) |
156 | void *hcpu) | ||
157 | { | 156 | { |
158 | unsigned int cpu = (unsigned long)hcpu; | 157 | unsigned int cpu = (unsigned long)hcpu; |
159 | int err = 0; | 158 | int err = 0; |
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index b1581527a236..69eb2fa25494 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c | |||
@@ -133,7 +133,7 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev) | |||
133 | { | 133 | { |
134 | } | 134 | } |
135 | 135 | ||
136 | void __cpuinit x86_of_pci_init(void) | 136 | void x86_of_pci_init(void) |
137 | { | 137 | { |
138 | pcibios_enable_irq = x86_of_pci_irq_enable; | 138 | pcibios_enable_irq = x86_of_pci_irq_enable; |
139 | pcibios_disable_irq = x86_of_pci_irq_disable; | 139 | pcibios_disable_irq = x86_of_pci_irq_disable; |
@@ -364,9 +364,7 @@ static void dt_add_ioapic_domain(unsigned int ioapic_num, | |||
364 | * and assigned so we can keep the 1:1 mapping which the ioapic | 364 | * and assigned so we can keep the 1:1 mapping which the ioapic |
365 | * is having. | 365 | * is having. |
366 | */ | 366 | */ |
367 | ret = irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY); | 367 | irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY); |
368 | if (ret) | ||
369 | pr_err("Error mapping legacy IRQs: %d\n", ret); | ||
370 | 368 | ||
371 | if (num > NR_IRQS_LEGACY) { | 369 | if (num > NR_IRQS_LEGACY) { |
372 | ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY, | 370 | ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY, |
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault.c index 155a13f33ed8..5d3fe8d36e4a 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/desc.h> | 10 | #include <asm/desc.h> |
11 | 11 | ||
12 | #ifdef CONFIG_X86_32 | ||
13 | |||
12 | #define DOUBLEFAULT_STACKSIZE (1024) | 14 | #define DOUBLEFAULT_STACKSIZE (1024) |
13 | static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; | 15 | static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; |
14 | #define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) | 16 | #define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) |
@@ -67,3 +69,16 @@ struct tss_struct doublefault_tss __cacheline_aligned = { | |||
67 | .__cr3 = __pa_nodebug(swapper_pg_dir), | 69 | .__cr3 = __pa_nodebug(swapper_pg_dir), |
68 | } | 70 | } |
69 | }; | 71 | }; |
72 | |||
73 | /* dummy for do_double_fault() call */ | ||
74 | void df_debug(struct pt_regs *regs, long error_code) {} | ||
75 | |||
76 | #else /* !CONFIG_X86_32 */ | ||
77 | |||
78 | void df_debug(struct pt_regs *regs, long error_code) | ||
79 | { | ||
80 | pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code); | ||
81 | show_regs(regs); | ||
82 | panic("Machine halted."); | ||
83 | } | ||
84 | #endif | ||
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 94ab6b90dd3f..63bdb29b2549 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
196 | static void __init intel_remapping_check(int num, int slot, int func) | 196 | static void __init intel_remapping_check(int num, int slot, int func) |
197 | { | 197 | { |
198 | u8 revision; | 198 | u8 revision; |
199 | u16 device; | ||
199 | 200 | ||
201 | device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); | ||
200 | revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); | 202 | revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); |
201 | 203 | ||
202 | /* | 204 | /* |
203 | * Revision 0x13 of this chipset supports irq remapping | 205 | * Revision 13 of all triggering devices id in this quirk have |
204 | * but has an erratum that breaks its behavior, flag it as such | 206 | * a problem draining interrupts when irq remapping is enabled, |
207 | * and should be flagged as broken. Additionally revisions 0x12 | ||
208 | * and 0x22 of device id 0x3405 has this problem. | ||
205 | */ | 209 | */ |
206 | if (revision == 0x13) | 210 | if (revision == 0x13) |
207 | set_irq_remapping_broken(); | 211 | set_irq_remapping_broken(); |
212 | else if ((device == 0x3405) && | ||
213 | ((revision == 0x12) || | ||
214 | (revision == 0x22))) | ||
215 | set_irq_remapping_broken(); | ||
208 | 216 | ||
209 | } | 217 | } |
210 | 218 | ||
@@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = { | |||
239 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, | 247 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, |
240 | { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, | 248 | { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, |
241 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | 249 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, |
250 | { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST, | ||
251 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | ||
242 | { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, | 252 | { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, |
243 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | 253 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, |
244 | {} | 254 | {} |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 8f3e2dec1df3..2cfbc3a3a2dd 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -801,7 +801,17 @@ ENTRY(name) \ | |||
801 | CFI_ENDPROC; \ | 801 | CFI_ENDPROC; \ |
802 | ENDPROC(name) | 802 | ENDPROC(name) |
803 | 803 | ||
804 | #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name) | 804 | |
805 | #ifdef CONFIG_TRACING | ||
806 | #define TRACE_BUILD_INTERRUPT(name, nr) \ | ||
807 | BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name) | ||
808 | #else | ||
809 | #define TRACE_BUILD_INTERRUPT(name, nr) | ||
810 | #endif | ||
811 | |||
812 | #define BUILD_INTERRUPT(name, nr) \ | ||
813 | BUILD_INTERRUPT3(name, nr, smp_##name); \ | ||
814 | TRACE_BUILD_INTERRUPT(name, nr) | ||
805 | 815 | ||
806 | /* The include is where all of the SMP etc. interrupts come from */ | 816 | /* The include is where all of the SMP etc. interrupts come from */ |
807 | #include <asm/entry_arch.h> | 817 | #include <asm/entry_arch.h> |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 727208941030..1b69951a81e2 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -365,7 +365,7 @@ ENDPROC(native_usergs_sysret64) | |||
365 | /*CFI_REL_OFFSET ss,0*/ | 365 | /*CFI_REL_OFFSET ss,0*/ |
366 | pushq_cfi %rax /* rsp */ | 366 | pushq_cfi %rax /* rsp */ |
367 | CFI_REL_OFFSET rsp,0 | 367 | CFI_REL_OFFSET rsp,0 |
368 | pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */ | 368 | pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */ |
369 | /*CFI_REL_OFFSET rflags,0*/ | 369 | /*CFI_REL_OFFSET rflags,0*/ |
370 | pushq_cfi $__KERNEL_CS /* cs */ | 370 | pushq_cfi $__KERNEL_CS /* cs */ |
371 | /*CFI_REL_OFFSET cs,0*/ | 371 | /*CFI_REL_OFFSET cs,0*/ |
@@ -1138,7 +1138,7 @@ END(common_interrupt) | |||
1138 | /* | 1138 | /* |
1139 | * APIC interrupts. | 1139 | * APIC interrupts. |
1140 | */ | 1140 | */ |
1141 | .macro apicinterrupt num sym do_sym | 1141 | .macro apicinterrupt3 num sym do_sym |
1142 | ENTRY(\sym) | 1142 | ENTRY(\sym) |
1143 | INTR_FRAME | 1143 | INTR_FRAME |
1144 | ASM_CLAC | 1144 | ASM_CLAC |
@@ -1150,15 +1150,32 @@ ENTRY(\sym) | |||
1150 | END(\sym) | 1150 | END(\sym) |
1151 | .endm | 1151 | .endm |
1152 | 1152 | ||
1153 | #ifdef CONFIG_TRACING | ||
1154 | #define trace(sym) trace_##sym | ||
1155 | #define smp_trace(sym) smp_trace_##sym | ||
1156 | |||
1157 | .macro trace_apicinterrupt num sym | ||
1158 | apicinterrupt3 \num trace(\sym) smp_trace(\sym) | ||
1159 | .endm | ||
1160 | #else | ||
1161 | .macro trace_apicinterrupt num sym do_sym | ||
1162 | .endm | ||
1163 | #endif | ||
1164 | |||
1165 | .macro apicinterrupt num sym do_sym | ||
1166 | apicinterrupt3 \num \sym \do_sym | ||
1167 | trace_apicinterrupt \num \sym | ||
1168 | .endm | ||
1169 | |||
1153 | #ifdef CONFIG_SMP | 1170 | #ifdef CONFIG_SMP |
1154 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ | 1171 | apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR \ |
1155 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt | 1172 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
1156 | apicinterrupt REBOOT_VECTOR \ | 1173 | apicinterrupt3 REBOOT_VECTOR \ |
1157 | reboot_interrupt smp_reboot_interrupt | 1174 | reboot_interrupt smp_reboot_interrupt |
1158 | #endif | 1175 | #endif |
1159 | 1176 | ||
1160 | #ifdef CONFIG_X86_UV | 1177 | #ifdef CONFIG_X86_UV |
1161 | apicinterrupt UV_BAU_MESSAGE \ | 1178 | apicinterrupt3 UV_BAU_MESSAGE \ |
1162 | uv_bau_message_intr1 uv_bau_message_interrupt | 1179 | uv_bau_message_intr1 uv_bau_message_interrupt |
1163 | #endif | 1180 | #endif |
1164 | apicinterrupt LOCAL_TIMER_VECTOR \ | 1181 | apicinterrupt LOCAL_TIMER_VECTOR \ |
@@ -1167,14 +1184,19 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ | |||
1167 | x86_platform_ipi smp_x86_platform_ipi | 1184 | x86_platform_ipi smp_x86_platform_ipi |
1168 | 1185 | ||
1169 | #ifdef CONFIG_HAVE_KVM | 1186 | #ifdef CONFIG_HAVE_KVM |
1170 | apicinterrupt POSTED_INTR_VECTOR \ | 1187 | apicinterrupt3 POSTED_INTR_VECTOR \ |
1171 | kvm_posted_intr_ipi smp_kvm_posted_intr_ipi | 1188 | kvm_posted_intr_ipi smp_kvm_posted_intr_ipi |
1172 | #endif | 1189 | #endif |
1173 | 1190 | ||
1191 | #ifdef CONFIG_X86_MCE_THRESHOLD | ||
1174 | apicinterrupt THRESHOLD_APIC_VECTOR \ | 1192 | apicinterrupt THRESHOLD_APIC_VECTOR \ |
1175 | threshold_interrupt smp_threshold_interrupt | 1193 | threshold_interrupt smp_threshold_interrupt |
1194 | #endif | ||
1195 | |||
1196 | #ifdef CONFIG_X86_THERMAL_VECTOR | ||
1176 | apicinterrupt THERMAL_APIC_VECTOR \ | 1197 | apicinterrupt THERMAL_APIC_VECTOR \ |
1177 | thermal_interrupt smp_thermal_interrupt | 1198 | thermal_interrupt smp_thermal_interrupt |
1199 | #endif | ||
1178 | 1200 | ||
1179 | #ifdef CONFIG_SMP | 1201 | #ifdef CONFIG_SMP |
1180 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ | 1202 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ |
@@ -1451,13 +1473,13 @@ ENTRY(xen_failsafe_callback) | |||
1451 | CFI_ENDPROC | 1473 | CFI_ENDPROC |
1452 | END(xen_failsafe_callback) | 1474 | END(xen_failsafe_callback) |
1453 | 1475 | ||
1454 | apicinterrupt HYPERVISOR_CALLBACK_VECTOR \ | 1476 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
1455 | xen_hvm_callback_vector xen_evtchn_do_upcall | 1477 | xen_hvm_callback_vector xen_evtchn_do_upcall |
1456 | 1478 | ||
1457 | #endif /* CONFIG_XEN */ | 1479 | #endif /* CONFIG_XEN */ |
1458 | 1480 | ||
1459 | #if IS_ENABLED(CONFIG_HYPERV) | 1481 | #if IS_ENABLED(CONFIG_HYPERV) |
1460 | apicinterrupt HYPERVISOR_CALLBACK_VECTOR \ | 1482 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
1461 | hyperv_callback_vector hyperv_vector_handler | 1483 | hyperv_callback_vector hyperv_vector_handler |
1462 | #endif /* CONFIG_HYPERV */ | 1484 | #endif /* CONFIG_HYPERV */ |
1463 | 1485 | ||
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 73afd11799ca..5dd87a89f011 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -292,7 +292,6 @@ ENDPROC(start_cpu0) | |||
292 | * If cpu hotplug is not supported then this code can go in init section | 292 | * If cpu hotplug is not supported then this code can go in init section |
293 | * which will be freed later | 293 | * which will be freed later |
294 | */ | 294 | */ |
295 | __CPUINIT | ||
296 | ENTRY(startup_32_smp) | 295 | ENTRY(startup_32_smp) |
297 | cld | 296 | cld |
298 | movl $(__BOOT_DS),%eax | 297 | movl $(__BOOT_DS),%eax |
@@ -444,7 +443,6 @@ is486: | |||
444 | orl %ecx,%eax | 443 | orl %ecx,%eax |
445 | movl %eax,%cr0 | 444 | movl %eax,%cr0 |
446 | 445 | ||
447 | call check_x87 | ||
448 | lgdt early_gdt_descr | 446 | lgdt early_gdt_descr |
449 | lidt idt_descr | 447 | lidt idt_descr |
450 | ljmp $(__KERNEL_CS),$1f | 448 | ljmp $(__KERNEL_CS),$1f |
@@ -467,26 +465,6 @@ is486: | |||
467 | pushl $0 # fake return address for unwinder | 465 | pushl $0 # fake return address for unwinder |
468 | jmp *(initial_code) | 466 | jmp *(initial_code) |
469 | 467 | ||
470 | /* | ||
471 | * We depend on ET to be correct. This checks for 287/387. | ||
472 | */ | ||
473 | check_x87: | ||
474 | movb $0,X86_HARD_MATH | ||
475 | clts | ||
476 | fninit | ||
477 | fstsw %ax | ||
478 | cmpb $0,%al | ||
479 | je 1f | ||
480 | movl %cr0,%eax /* no coprocessor: have to set bits */ | ||
481 | xorl $4,%eax /* set EM */ | ||
482 | movl %eax,%cr0 | ||
483 | ret | ||
484 | ALIGN | ||
485 | 1: movb $1,X86_HARD_MATH | ||
486 | .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */ | ||
487 | ret | ||
488 | |||
489 | |||
490 | #include "verify_cpu.S" | 468 | #include "verify_cpu.S" |
491 | 469 | ||
492 | /* | 470 | /* |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 321d65ebaffe..e1aabdb314c8 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -512,15 +512,6 @@ ENTRY(phys_base) | |||
512 | 512 | ||
513 | #include "../../x86/xen/xen-head.S" | 513 | #include "../../x86/xen/xen-head.S" |
514 | 514 | ||
515 | .section .bss, "aw", @nobits | ||
516 | .align L1_CACHE_BYTES | ||
517 | ENTRY(idt_table) | ||
518 | .skip IDT_ENTRIES * 16 | ||
519 | |||
520 | .align L1_CACHE_BYTES | ||
521 | ENTRY(nmi_idt_table) | ||
522 | .skip IDT_ENTRIES * 16 | ||
523 | |||
524 | __PAGE_ALIGNED_BSS | 515 | __PAGE_ALIGNED_BSS |
525 | NEXT_PAGE(empty_zero_page) | 516 | NEXT_PAGE(empty_zero_page) |
526 | .skip PAGE_SIZE | 517 | .skip PAGE_SIZE |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 02f07634d265..f66ff162dce8 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -393,6 +393,9 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | |||
393 | unregister_hw_breakpoint(t->ptrace_bps[i]); | 393 | unregister_hw_breakpoint(t->ptrace_bps[i]); |
394 | t->ptrace_bps[i] = NULL; | 394 | t->ptrace_bps[i] = NULL; |
395 | } | 395 | } |
396 | |||
397 | t->debugreg6 = 0; | ||
398 | t->ptrace_dr7 = 0; | ||
396 | } | 399 | } |
397 | 400 | ||
398 | void hw_breakpoint_restore(void) | 401 | void hw_breakpoint_restore(void) |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index cb339097b9ea..5d576ab34403 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -108,15 +108,15 @@ EXPORT_SYMBOL(unlazy_fpu); | |||
108 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 108 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
109 | unsigned int xstate_size; | 109 | unsigned int xstate_size; |
110 | EXPORT_SYMBOL_GPL(xstate_size); | 110 | EXPORT_SYMBOL_GPL(xstate_size); |
111 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; | 111 | static struct i387_fxsave_struct fx_scratch; |
112 | 112 | ||
113 | static void __cpuinit mxcsr_feature_mask_init(void) | 113 | static void mxcsr_feature_mask_init(void) |
114 | { | 114 | { |
115 | unsigned long mask = 0; | 115 | unsigned long mask = 0; |
116 | 116 | ||
117 | if (cpu_has_fxsr) { | 117 | if (cpu_has_fxsr) { |
118 | memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); | 118 | memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); |
119 | asm volatile("fxsave %0" : : "m" (fx_scratch)); | 119 | asm volatile("fxsave %0" : "+m" (fx_scratch)); |
120 | mask = fx_scratch.mxcsr_mask; | 120 | mask = fx_scratch.mxcsr_mask; |
121 | if (mask == 0) | 121 | if (mask == 0) |
122 | mask = 0x0000ffbf; | 122 | mask = 0x0000ffbf; |
@@ -124,14 +124,14 @@ static void __cpuinit mxcsr_feature_mask_init(void) | |||
124 | mxcsr_feature_mask &= mask; | 124 | mxcsr_feature_mask &= mask; |
125 | } | 125 | } |
126 | 126 | ||
127 | static void __cpuinit init_thread_xstate(void) | 127 | static void init_thread_xstate(void) |
128 | { | 128 | { |
129 | /* | 129 | /* |
130 | * Note that xstate_size might be overwriten later during | 130 | * Note that xstate_size might be overwriten later during |
131 | * xsave_init(). | 131 | * xsave_init(). |
132 | */ | 132 | */ |
133 | 133 | ||
134 | if (!HAVE_HWFP) { | 134 | if (!cpu_has_fpu) { |
135 | /* | 135 | /* |
136 | * Disable xsave as we do not support it if i387 | 136 | * Disable xsave as we do not support it if i387 |
137 | * emulation is enabled. | 137 | * emulation is enabled. |
@@ -153,11 +153,19 @@ static void __cpuinit init_thread_xstate(void) | |||
153 | * into all processes. | 153 | * into all processes. |
154 | */ | 154 | */ |
155 | 155 | ||
156 | void __cpuinit fpu_init(void) | 156 | void fpu_init(void) |
157 | { | 157 | { |
158 | unsigned long cr0; | 158 | unsigned long cr0; |
159 | unsigned long cr4_mask = 0; | 159 | unsigned long cr4_mask = 0; |
160 | 160 | ||
161 | #ifndef CONFIG_MATH_EMULATION | ||
162 | if (!cpu_has_fpu) { | ||
163 | pr_emerg("No FPU found and no math emulation present\n"); | ||
164 | pr_emerg("Giving up\n"); | ||
165 | for (;;) | ||
166 | asm volatile("hlt"); | ||
167 | } | ||
168 | #endif | ||
161 | if (cpu_has_fxsr) | 169 | if (cpu_has_fxsr) |
162 | cr4_mask |= X86_CR4_OSFXSR; | 170 | cr4_mask |= X86_CR4_OSFXSR; |
163 | if (cpu_has_xmm) | 171 | if (cpu_has_xmm) |
@@ -167,7 +175,7 @@ void __cpuinit fpu_init(void) | |||
167 | 175 | ||
168 | cr0 = read_cr0(); | 176 | cr0 = read_cr0(); |
169 | cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ | 177 | cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ |
170 | if (!HAVE_HWFP) | 178 | if (!cpu_has_fpu) |
171 | cr0 |= X86_CR0_EM; | 179 | cr0 |= X86_CR0_EM; |
172 | write_cr0(cr0); | 180 | write_cr0(cr0); |
173 | 181 | ||
@@ -185,7 +193,7 @@ void __cpuinit fpu_init(void) | |||
185 | 193 | ||
186 | void fpu_finit(struct fpu *fpu) | 194 | void fpu_finit(struct fpu *fpu) |
187 | { | 195 | { |
188 | if (!HAVE_HWFP) { | 196 | if (!cpu_has_fpu) { |
189 | finit_soft_fpu(&fpu->state->soft); | 197 | finit_soft_fpu(&fpu->state->soft); |
190 | return; | 198 | return; |
191 | } | 199 | } |
@@ -214,7 +222,7 @@ int init_fpu(struct task_struct *tsk) | |||
214 | int ret; | 222 | int ret; |
215 | 223 | ||
216 | if (tsk_used_math(tsk)) { | 224 | if (tsk_used_math(tsk)) { |
217 | if (HAVE_HWFP && tsk == current) | 225 | if (cpu_has_fpu && tsk == current) |
218 | unlazy_fpu(tsk); | 226 | unlazy_fpu(tsk); |
219 | tsk->thread.fpu.last_cpu = ~0; | 227 | tsk->thread.fpu.last_cpu = ~0; |
220 | return 0; | 228 | return 0; |
@@ -511,14 +519,13 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
511 | if (ret) | 519 | if (ret) |
512 | return ret; | 520 | return ret; |
513 | 521 | ||
514 | if (!HAVE_HWFP) | 522 | if (!static_cpu_has(X86_FEATURE_FPU)) |
515 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); | 523 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); |
516 | 524 | ||
517 | if (!cpu_has_fxsr) { | 525 | if (!cpu_has_fxsr) |
518 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 526 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
519 | &target->thread.fpu.state->fsave, 0, | 527 | &target->thread.fpu.state->fsave, 0, |
520 | -1); | 528 | -1); |
521 | } | ||
522 | 529 | ||
523 | sanitize_i387_state(target); | 530 | sanitize_i387_state(target); |
524 | 531 | ||
@@ -545,13 +552,13 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
545 | 552 | ||
546 | sanitize_i387_state(target); | 553 | sanitize_i387_state(target); |
547 | 554 | ||
548 | if (!HAVE_HWFP) | 555 | if (!static_cpu_has(X86_FEATURE_FPU)) |
549 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | 556 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); |
550 | 557 | ||
551 | if (!cpu_has_fxsr) { | 558 | if (!cpu_has_fxsr) |
552 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 559 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
553 | &target->thread.fpu.state->fsave, 0, -1); | 560 | &target->thread.fpu.state->fsave, 0, |
554 | } | 561 | -1); |
555 | 562 | ||
556 | if (pos > 0 || count < sizeof(env)) | 563 | if (pos > 0 || count < sizeof(env)) |
557 | convert_from_fxsr(&env, target); | 564 | convert_from_fxsr(&env, target); |
@@ -592,3 +599,33 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu) | |||
592 | EXPORT_SYMBOL(dump_fpu); | 599 | EXPORT_SYMBOL(dump_fpu); |
593 | 600 | ||
594 | #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ | 601 | #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ |
602 | |||
603 | static int __init no_387(char *s) | ||
604 | { | ||
605 | setup_clear_cpu_cap(X86_FEATURE_FPU); | ||
606 | return 1; | ||
607 | } | ||
608 | |||
609 | __setup("no387", no_387); | ||
610 | |||
611 | void fpu_detect(struct cpuinfo_x86 *c) | ||
612 | { | ||
613 | unsigned long cr0; | ||
614 | u16 fsw, fcw; | ||
615 | |||
616 | fsw = fcw = 0xffff; | ||
617 | |||
618 | cr0 = read_cr0(); | ||
619 | cr0 &= ~(X86_CR0_TS | X86_CR0_EM); | ||
620 | write_cr0(cr0); | ||
621 | |||
622 | asm volatile("fninit ; fnstsw %0 ; fnstcw %1" | ||
623 | : "+m" (fsw), "+m" (fcw)); | ||
624 | |||
625 | if (fsw == 0 && (fcw & 0x103f) == 0x003f) | ||
626 | set_cpu_cap(c, X86_FEATURE_FPU); | ||
627 | else | ||
628 | clear_cpu_cap(c, X86_FEATURE_FPU); | ||
629 | |||
630 | /* The final cr0 value is set in fpu_init() */ | ||
631 | } | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index ac0631d8996f..3a8185c042a2 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -18,6 +18,9 @@ | |||
18 | #include <asm/mce.h> | 18 | #include <asm/mce.h> |
19 | #include <asm/hw_irq.h> | 19 | #include <asm/hw_irq.h> |
20 | 20 | ||
21 | #define CREATE_TRACE_POINTS | ||
22 | #include <asm/trace/irq_vectors.h> | ||
23 | |||
21 | atomic_t irq_err_count; | 24 | atomic_t irq_err_count; |
22 | 25 | ||
23 | /* Function pointer for generic interrupt vector handling */ | 26 | /* Function pointer for generic interrupt vector handling */ |
@@ -204,23 +207,21 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
204 | /* | 207 | /* |
205 | * Handler for X86_PLATFORM_IPI_VECTOR. | 208 | * Handler for X86_PLATFORM_IPI_VECTOR. |
206 | */ | 209 | */ |
207 | void smp_x86_platform_ipi(struct pt_regs *regs) | 210 | void __smp_x86_platform_ipi(void) |
208 | { | 211 | { |
209 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
210 | |||
211 | ack_APIC_irq(); | ||
212 | |||
213 | irq_enter(); | ||
214 | |||
215 | exit_idle(); | ||
216 | |||
217 | inc_irq_stat(x86_platform_ipis); | 212 | inc_irq_stat(x86_platform_ipis); |
218 | 213 | ||
219 | if (x86_platform_ipi_callback) | 214 | if (x86_platform_ipi_callback) |
220 | x86_platform_ipi_callback(); | 215 | x86_platform_ipi_callback(); |
216 | } | ||
221 | 217 | ||
222 | irq_exit(); | 218 | void smp_x86_platform_ipi(struct pt_regs *regs) |
219 | { | ||
220 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
223 | 221 | ||
222 | entering_ack_irq(); | ||
223 | __smp_x86_platform_ipi(); | ||
224 | exiting_irq(); | ||
224 | set_irq_regs(old_regs); | 225 | set_irq_regs(old_regs); |
225 | } | 226 | } |
226 | 227 | ||
@@ -246,6 +247,18 @@ void smp_kvm_posted_intr_ipi(struct pt_regs *regs) | |||
246 | } | 247 | } |
247 | #endif | 248 | #endif |
248 | 249 | ||
250 | void smp_trace_x86_platform_ipi(struct pt_regs *regs) | ||
251 | { | ||
252 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
253 | |||
254 | entering_ack_irq(); | ||
255 | trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); | ||
256 | __smp_x86_platform_ipi(); | ||
257 | trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); | ||
258 | exiting_irq(); | ||
259 | set_irq_regs(old_regs); | ||
260 | } | ||
261 | |||
249 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); | 262 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
250 | 263 | ||
251 | #ifdef CONFIG_HOTPLUG_CPU | 264 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 344faf8d0d62..4186755f1d7c 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -119,7 +119,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
119 | /* | 119 | /* |
120 | * allocate per-cpu stacks for hardirq and for softirq processing | 120 | * allocate per-cpu stacks for hardirq and for softirq processing |
121 | */ | 121 | */ |
122 | void __cpuinit irq_ctx_init(int cpu) | 122 | void irq_ctx_init(int cpu) |
123 | { | 123 | { |
124 | union irq_ctx *irqctx; | 124 | union irq_ctx *irqctx; |
125 | 125 | ||
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c index ca8f703a1e70..636a55e4a13c 100644 --- a/arch/x86/kernel/irq_work.c +++ b/arch/x86/kernel/irq_work.c | |||
@@ -8,14 +8,34 @@ | |||
8 | #include <linux/irq_work.h> | 8 | #include <linux/irq_work.h> |
9 | #include <linux/hardirq.h> | 9 | #include <linux/hardirq.h> |
10 | #include <asm/apic.h> | 10 | #include <asm/apic.h> |
11 | #include <asm/trace/irq_vectors.h> | ||
11 | 12 | ||
12 | void smp_irq_work_interrupt(struct pt_regs *regs) | 13 | static inline void irq_work_entering_irq(void) |
13 | { | 14 | { |
14 | irq_enter(); | 15 | irq_enter(); |
15 | ack_APIC_irq(); | 16 | ack_APIC_irq(); |
17 | } | ||
18 | |||
19 | static inline void __smp_irq_work_interrupt(void) | ||
20 | { | ||
16 | inc_irq_stat(apic_irq_work_irqs); | 21 | inc_irq_stat(apic_irq_work_irqs); |
17 | irq_work_run(); | 22 | irq_work_run(); |
18 | irq_exit(); | 23 | } |
24 | |||
25 | void smp_irq_work_interrupt(struct pt_regs *regs) | ||
26 | { | ||
27 | irq_work_entering_irq(); | ||
28 | __smp_irq_work_interrupt(); | ||
29 | exiting_irq(); | ||
30 | } | ||
31 | |||
32 | void smp_trace_irq_work_interrupt(struct pt_regs *regs) | ||
33 | { | ||
34 | irq_work_entering_irq(); | ||
35 | trace_irq_work_entry(IRQ_WORK_VECTOR); | ||
36 | __smp_irq_work_interrupt(); | ||
37 | trace_irq_work_exit(IRQ_WORK_VECTOR); | ||
38 | exiting_irq(); | ||
19 | } | 39 | } |
20 | 40 | ||
21 | void arch_irq_work_raise(void) | 41 | void arch_irq_work_raise(void) |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index cd6d9a5a42f6..a96d32cc55b8 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -320,7 +320,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val) | |||
320 | apic_write(APIC_EOI, APIC_EOI_ACK); | 320 | apic_write(APIC_EOI, APIC_EOI_ACK); |
321 | } | 321 | } |
322 | 322 | ||
323 | void __cpuinit kvm_guest_cpu_init(void) | 323 | void kvm_guest_cpu_init(void) |
324 | { | 324 | { |
325 | if (!kvm_para_available()) | 325 | if (!kvm_para_available()) |
326 | return; | 326 | return; |
@@ -421,7 +421,7 @@ static void __init kvm_smp_prepare_boot_cpu(void) | |||
421 | native_smp_prepare_boot_cpu(); | 421 | native_smp_prepare_boot_cpu(); |
422 | } | 422 | } |
423 | 423 | ||
424 | static void __cpuinit kvm_guest_cpu_online(void *dummy) | 424 | static void kvm_guest_cpu_online(void *dummy) |
425 | { | 425 | { |
426 | kvm_guest_cpu_init(); | 426 | kvm_guest_cpu_init(); |
427 | } | 427 | } |
@@ -435,8 +435,8 @@ static void kvm_guest_cpu_offline(void *dummy) | |||
435 | apf_task_wake_all(); | 435 | apf_task_wake_all(); |
436 | } | 436 | } |
437 | 437 | ||
438 | static int __cpuinit kvm_cpu_notify(struct notifier_block *self, | 438 | static int kvm_cpu_notify(struct notifier_block *self, unsigned long action, |
439 | unsigned long action, void *hcpu) | 439 | void *hcpu) |
440 | { | 440 | { |
441 | int cpu = (unsigned long)hcpu; | 441 | int cpu = (unsigned long)hcpu; |
442 | switch (action) { | 442 | switch (action) { |
@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self, | |||
455 | return NOTIFY_OK; | 455 | return NOTIFY_OK; |
456 | } | 456 | } |
457 | 457 | ||
458 | static struct notifier_block __cpuinitdata kvm_cpu_notifier = { | 458 | static struct notifier_block kvm_cpu_notifier = { |
459 | .notifier_call = kvm_cpu_notify, | 459 | .notifier_call = kvm_cpu_notify, |
460 | }; | 460 | }; |
461 | #endif | 461 | #endif |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 3dd37ebd591b..1570e0741344 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -48,10 +48,9 @@ static struct pvclock_wall_clock wall_clock; | |||
48 | * have elapsed since the hypervisor wrote the data. So we try to account for | 48 | * have elapsed since the hypervisor wrote the data. So we try to account for |
49 | * that with system time | 49 | * that with system time |
50 | */ | 50 | */ |
51 | static unsigned long kvm_get_wallclock(void) | 51 | static void kvm_get_wallclock(struct timespec *now) |
52 | { | 52 | { |
53 | struct pvclock_vcpu_time_info *vcpu_time; | 53 | struct pvclock_vcpu_time_info *vcpu_time; |
54 | struct timespec ts; | ||
55 | int low, high; | 54 | int low, high; |
56 | int cpu; | 55 | int cpu; |
57 | 56 | ||
@@ -64,14 +63,12 @@ static unsigned long kvm_get_wallclock(void) | |||
64 | cpu = smp_processor_id(); | 63 | cpu = smp_processor_id(); |
65 | 64 | ||
66 | vcpu_time = &hv_clock[cpu].pvti; | 65 | vcpu_time = &hv_clock[cpu].pvti; |
67 | pvclock_read_wallclock(&wall_clock, vcpu_time, &ts); | 66 | pvclock_read_wallclock(&wall_clock, vcpu_time, now); |
68 | 67 | ||
69 | preempt_enable(); | 68 | preempt_enable(); |
70 | |||
71 | return ts.tv_sec; | ||
72 | } | 69 | } |
73 | 70 | ||
74 | static int kvm_set_wallclock(unsigned long now) | 71 | static int kvm_set_wallclock(const struct timespec *now) |
75 | { | 72 | { |
76 | return -1; | 73 | return -1; |
77 | } | 74 | } |
@@ -185,7 +182,7 @@ static void kvm_restore_sched_clock_state(void) | |||
185 | } | 182 | } |
186 | 183 | ||
187 | #ifdef CONFIG_X86_LOCAL_APIC | 184 | #ifdef CONFIG_X86_LOCAL_APIC |
188 | static void __cpuinit kvm_setup_secondary_clock(void) | 185 | static void kvm_setup_secondary_clock(void) |
189 | { | 186 | { |
190 | /* | 187 | /* |
191 | * Now that the first cpu already had this clocksource initialized, | 188 | * Now that the first cpu already had this clocksource initialized, |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index efdec7cd8e01..7a0adb7ee433 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -31,48 +31,12 @@ | |||
31 | #include <asm/microcode.h> | 31 | #include <asm/microcode.h> |
32 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
33 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
34 | #include <asm/microcode_amd.h> | ||
34 | 35 | ||
35 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); | 36 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); |
36 | MODULE_AUTHOR("Peter Oruba"); | 37 | MODULE_AUTHOR("Peter Oruba"); |
37 | MODULE_LICENSE("GPL v2"); | 38 | MODULE_LICENSE("GPL v2"); |
38 | 39 | ||
39 | #define UCODE_MAGIC 0x00414d44 | ||
40 | #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 | ||
41 | #define UCODE_UCODE_TYPE 0x00000001 | ||
42 | |||
43 | struct equiv_cpu_entry { | ||
44 | u32 installed_cpu; | ||
45 | u32 fixed_errata_mask; | ||
46 | u32 fixed_errata_compare; | ||
47 | u16 equiv_cpu; | ||
48 | u16 res; | ||
49 | } __attribute__((packed)); | ||
50 | |||
51 | struct microcode_header_amd { | ||
52 | u32 data_code; | ||
53 | u32 patch_id; | ||
54 | u16 mc_patch_data_id; | ||
55 | u8 mc_patch_data_len; | ||
56 | u8 init_flag; | ||
57 | u32 mc_patch_data_checksum; | ||
58 | u32 nb_dev_id; | ||
59 | u32 sb_dev_id; | ||
60 | u16 processor_rev_id; | ||
61 | u8 nb_rev_id; | ||
62 | u8 sb_rev_id; | ||
63 | u8 bios_api_rev; | ||
64 | u8 reserved1[3]; | ||
65 | u32 match_reg[8]; | ||
66 | } __attribute__((packed)); | ||
67 | |||
68 | struct microcode_amd { | ||
69 | struct microcode_header_amd hdr; | ||
70 | unsigned int mpb[0]; | ||
71 | }; | ||
72 | |||
73 | #define SECTION_HDR_SIZE 8 | ||
74 | #define CONTAINER_HDR_SZ 12 | ||
75 | |||
76 | static struct equiv_cpu_entry *equiv_cpu_table; | 40 | static struct equiv_cpu_entry *equiv_cpu_table; |
77 | 41 | ||
78 | struct ucode_patch { | 42 | struct ucode_patch { |
@@ -84,21 +48,10 @@ struct ucode_patch { | |||
84 | 48 | ||
85 | static LIST_HEAD(pcache); | 49 | static LIST_HEAD(pcache); |
86 | 50 | ||
87 | static u16 find_equiv_id(unsigned int cpu) | 51 | static u16 __find_equiv_id(unsigned int cpu) |
88 | { | 52 | { |
89 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 53 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
90 | int i = 0; | 54 | return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig); |
91 | |||
92 | if (!equiv_cpu_table) | ||
93 | return 0; | ||
94 | |||
95 | while (equiv_cpu_table[i].installed_cpu != 0) { | ||
96 | if (uci->cpu_sig.sig == equiv_cpu_table[i].installed_cpu) | ||
97 | return equiv_cpu_table[i].equiv_cpu; | ||
98 | |||
99 | i++; | ||
100 | } | ||
101 | return 0; | ||
102 | } | 55 | } |
103 | 56 | ||
104 | static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu) | 57 | static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu) |
@@ -163,7 +116,7 @@ static struct ucode_patch *find_patch(unsigned int cpu) | |||
163 | { | 116 | { |
164 | u16 equiv_id; | 117 | u16 equiv_id; |
165 | 118 | ||
166 | equiv_id = find_equiv_id(cpu); | 119 | equiv_id = __find_equiv_id(cpu); |
167 | if (!equiv_id) | 120 | if (!equiv_id) |
168 | return NULL; | 121 | return NULL; |
169 | 122 | ||
@@ -173,9 +126,20 @@ static struct ucode_patch *find_patch(unsigned int cpu) | |||
173 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | 126 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) |
174 | { | 127 | { |
175 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 128 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
129 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
130 | struct ucode_patch *p; | ||
176 | 131 | ||
177 | csig->sig = cpuid_eax(0x00000001); | 132 | csig->sig = cpuid_eax(0x00000001); |
178 | csig->rev = c->microcode; | 133 | csig->rev = c->microcode; |
134 | |||
135 | /* | ||
136 | * a patch could have been loaded early, set uci->mc so that | ||
137 | * mc_bp_resume() can call apply_microcode() | ||
138 | */ | ||
139 | p = find_patch(cpu); | ||
140 | if (p && (p->patch_id == csig->rev)) | ||
141 | uci->mc = p->data; | ||
142 | |||
179 | pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); | 143 | pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); |
180 | 144 | ||
181 | return 0; | 145 | return 0; |
@@ -215,7 +179,21 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, | |||
215 | return patch_size; | 179 | return patch_size; |
216 | } | 180 | } |
217 | 181 | ||
218 | static int apply_microcode_amd(int cpu) | 182 | int __apply_microcode_amd(struct microcode_amd *mc_amd) |
183 | { | ||
184 | u32 rev, dummy; | ||
185 | |||
186 | wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); | ||
187 | |||
188 | /* verify patch application was successful */ | ||
189 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); | ||
190 | if (rev != mc_amd->hdr.patch_id) | ||
191 | return -1; | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | int apply_microcode_amd(int cpu) | ||
219 | { | 197 | { |
220 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 198 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
221 | struct microcode_amd *mc_amd; | 199 | struct microcode_amd *mc_amd; |
@@ -242,19 +220,16 @@ static int apply_microcode_amd(int cpu) | |||
242 | return 0; | 220 | return 0; |
243 | } | 221 | } |
244 | 222 | ||
245 | wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); | 223 | if (__apply_microcode_amd(mc_amd)) { |
246 | |||
247 | /* verify patch application was successful */ | ||
248 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); | ||
249 | if (rev != mc_amd->hdr.patch_id) { | ||
250 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", | 224 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
251 | cpu, mc_amd->hdr.patch_id); | 225 | cpu, mc_amd->hdr.patch_id); |
252 | return -1; | 226 | return -1; |
253 | } | 227 | } |
228 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, | ||
229 | mc_amd->hdr.patch_id); | ||
254 | 230 | ||
255 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); | 231 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; |
256 | uci->cpu_sig.rev = rev; | 232 | c->microcode = mc_amd->hdr.patch_id; |
257 | c->microcode = rev; | ||
258 | 233 | ||
259 | return 0; | 234 | return 0; |
260 | } | 235 | } |
@@ -364,7 +339,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
364 | return crnt_size; | 339 | return crnt_size; |
365 | } | 340 | } |
366 | 341 | ||
367 | static enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) | 342 | static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) |
368 | { | 343 | { |
369 | enum ucode_state ret = UCODE_ERROR; | 344 | enum ucode_state ret = UCODE_ERROR; |
370 | unsigned int leftover; | 345 | unsigned int leftover; |
@@ -398,6 +373,32 @@ static enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) | |||
398 | return UCODE_OK; | 373 | return UCODE_OK; |
399 | } | 374 | } |
400 | 375 | ||
376 | enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) | ||
377 | { | ||
378 | enum ucode_state ret; | ||
379 | |||
380 | /* free old equiv table */ | ||
381 | free_equiv_cpu_table(); | ||
382 | |||
383 | ret = __load_microcode_amd(cpu, data, size); | ||
384 | |||
385 | if (ret != UCODE_OK) | ||
386 | cleanup(); | ||
387 | |||
388 | #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) | ||
389 | /* save BSP's matching patch for early load */ | ||
390 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { | ||
391 | struct ucode_patch *p = find_patch(cpu); | ||
392 | if (p) { | ||
393 | memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); | ||
394 | memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), | ||
395 | MPB_MAX_SIZE)); | ||
396 | } | ||
397 | } | ||
398 | #endif | ||
399 | return ret; | ||
400 | } | ||
401 | |||
401 | /* | 402 | /* |
402 | * AMD microcode firmware naming convention, up to family 15h they are in | 403 | * AMD microcode firmware naming convention, up to family 15h they are in |
403 | * the legacy file: | 404 | * the legacy file: |
@@ -440,12 +441,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, | |||
440 | goto fw_release; | 441 | goto fw_release; |
441 | } | 442 | } |
442 | 443 | ||
443 | /* free old equiv table */ | ||
444 | free_equiv_cpu_table(); | ||
445 | |||
446 | ret = load_microcode_amd(cpu, fw->data, fw->size); | 444 | ret = load_microcode_amd(cpu, fw->data, fw->size); |
447 | if (ret != UCODE_OK) | ||
448 | cleanup(); | ||
449 | 445 | ||
450 | fw_release: | 446 | fw_release: |
451 | release_firmware(fw); | 447 | release_firmware(fw); |
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c new file mode 100644 index 000000000000..1d14ffee5749 --- /dev/null +++ b/arch/x86/kernel/microcode_amd_early.c | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Jacob Shin <jacob.shin@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/earlycpio.h> | ||
12 | #include <linux/initrd.h> | ||
13 | |||
14 | #include <asm/cpu.h> | ||
15 | #include <asm/setup.h> | ||
16 | #include <asm/microcode_amd.h> | ||
17 | |||
18 | static bool ucode_loaded; | ||
19 | static u32 ucode_new_rev; | ||
20 | static unsigned long ucode_offset; | ||
21 | static size_t ucode_size; | ||
22 | |||
23 | /* | ||
24 | * Microcode patch container file is prepended to the initrd in cpio format. | ||
25 | * See Documentation/x86/early-microcode.txt | ||
26 | */ | ||
27 | static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin"; | ||
28 | |||
29 | static struct cpio_data __init find_ucode_in_initrd(void) | ||
30 | { | ||
31 | long offset = 0; | ||
32 | char *path; | ||
33 | void *start; | ||
34 | size_t size; | ||
35 | unsigned long *uoffset; | ||
36 | size_t *usize; | ||
37 | struct cpio_data cd; | ||
38 | |||
39 | #ifdef CONFIG_X86_32 | ||
40 | struct boot_params *p; | ||
41 | |||
42 | /* | ||
43 | * On 32-bit, early load occurs before paging is turned on so we need | ||
44 | * to use physical addresses. | ||
45 | */ | ||
46 | p = (struct boot_params *)__pa_nodebug(&boot_params); | ||
47 | path = (char *)__pa_nodebug(ucode_path); | ||
48 | start = (void *)p->hdr.ramdisk_image; | ||
49 | size = p->hdr.ramdisk_size; | ||
50 | uoffset = (unsigned long *)__pa_nodebug(&ucode_offset); | ||
51 | usize = (size_t *)__pa_nodebug(&ucode_size); | ||
52 | #else | ||
53 | path = ucode_path; | ||
54 | start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); | ||
55 | size = boot_params.hdr.ramdisk_size; | ||
56 | uoffset = &ucode_offset; | ||
57 | usize = &ucode_size; | ||
58 | #endif | ||
59 | |||
60 | cd = find_cpio_data(path, start, size, &offset); | ||
61 | if (!cd.data) | ||
62 | return cd; | ||
63 | |||
64 | if (*(u32 *)cd.data != UCODE_MAGIC) { | ||
65 | cd.data = NULL; | ||
66 | cd.size = 0; | ||
67 | return cd; | ||
68 | } | ||
69 | |||
70 | *uoffset = (u8 *)cd.data - (u8 *)start; | ||
71 | *usize = cd.size; | ||
72 | |||
73 | return cd; | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Early load occurs before we can vmalloc(). So we look for the microcode | ||
78 | * patch container file in initrd, traverse equivalent cpu table, look for a | ||
79 | * matching microcode patch, and update, all in initrd memory in place. | ||
80 | * When vmalloc() is available for use later -- on 64-bit during first AP load, | ||
81 | * and on 32-bit during save_microcode_in_initrd_amd() -- we can call | ||
82 | * load_microcode_amd() to save equivalent cpu table and microcode patches in | ||
83 | * kernel heap memory. | ||
84 | */ | ||
85 | static void apply_ucode_in_initrd(void *ucode, size_t size) | ||
86 | { | ||
87 | struct equiv_cpu_entry *eq; | ||
88 | u32 *header; | ||
89 | u8 *data; | ||
90 | u16 eq_id = 0; | ||
91 | int offset, left; | ||
92 | u32 rev, eax; | ||
93 | u32 *new_rev; | ||
94 | unsigned long *uoffset; | ||
95 | size_t *usize; | ||
96 | |||
97 | #ifdef CONFIG_X86_32 | ||
98 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); | ||
99 | uoffset = (unsigned long *)__pa_nodebug(&ucode_offset); | ||
100 | usize = (size_t *)__pa_nodebug(&ucode_size); | ||
101 | #else | ||
102 | new_rev = &ucode_new_rev; | ||
103 | uoffset = &ucode_offset; | ||
104 | usize = &ucode_size; | ||
105 | #endif | ||
106 | |||
107 | data = ucode; | ||
108 | left = size; | ||
109 | header = (u32 *)data; | ||
110 | |||
111 | /* find equiv cpu table */ | ||
112 | |||
113 | if (header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | ||
114 | header[2] == 0) /* size */ | ||
115 | return; | ||
116 | |||
117 | eax = cpuid_eax(0x00000001); | ||
118 | |||
119 | while (left > 0) { | ||
120 | eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); | ||
121 | |||
122 | offset = header[2] + CONTAINER_HDR_SZ; | ||
123 | data += offset; | ||
124 | left -= offset; | ||
125 | |||
126 | eq_id = find_equiv_id(eq, eax); | ||
127 | if (eq_id) | ||
128 | break; | ||
129 | |||
130 | /* | ||
131 | * support multiple container files appended together. if this | ||
132 | * one does not have a matching equivalent cpu entry, we fast | ||
133 | * forward to the next container file. | ||
134 | */ | ||
135 | while (left > 0) { | ||
136 | header = (u32 *)data; | ||
137 | if (header[0] == UCODE_MAGIC && | ||
138 | header[1] == UCODE_EQUIV_CPU_TABLE_TYPE) | ||
139 | break; | ||
140 | |||
141 | offset = header[1] + SECTION_HDR_SIZE; | ||
142 | data += offset; | ||
143 | left -= offset; | ||
144 | } | ||
145 | |||
146 | /* mark where the next microcode container file starts */ | ||
147 | offset = data - (u8 *)ucode; | ||
148 | *uoffset += offset; | ||
149 | *usize -= offset; | ||
150 | ucode = data; | ||
151 | } | ||
152 | |||
153 | if (!eq_id) { | ||
154 | *usize = 0; | ||
155 | return; | ||
156 | } | ||
157 | |||
158 | /* find ucode and update if needed */ | ||
159 | |||
160 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | ||
161 | |||
162 | while (left > 0) { | ||
163 | struct microcode_amd *mc; | ||
164 | |||
165 | header = (u32 *)data; | ||
166 | if (header[0] != UCODE_UCODE_TYPE || /* type */ | ||
167 | header[1] == 0) /* size */ | ||
168 | break; | ||
169 | |||
170 | mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); | ||
171 | if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) | ||
172 | if (__apply_microcode_amd(mc) == 0) { | ||
173 | rev = mc->hdr.patch_id; | ||
174 | *new_rev = rev; | ||
175 | } | ||
176 | |||
177 | offset = header[1] + SECTION_HDR_SIZE; | ||
178 | data += offset; | ||
179 | left -= offset; | ||
180 | } | ||
181 | |||
182 | /* mark where this microcode container file ends */ | ||
183 | offset = *usize - (data - (u8 *)ucode); | ||
184 | *usize -= offset; | ||
185 | |||
186 | if (!(*new_rev)) | ||
187 | *usize = 0; | ||
188 | } | ||
189 | |||
190 | void __init load_ucode_amd_bsp(void) | ||
191 | { | ||
192 | struct cpio_data cd = find_ucode_in_initrd(); | ||
193 | if (!cd.data) | ||
194 | return; | ||
195 | |||
196 | apply_ucode_in_initrd(cd.data, cd.size); | ||
197 | } | ||
198 | |||
199 | #ifdef CONFIG_X86_32 | ||
200 | u8 amd_bsp_mpb[MPB_MAX_SIZE]; | ||
201 | |||
202 | /* | ||
203 | * On 32-bit, since AP's early load occurs before paging is turned on, we | ||
204 | * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during | ||
205 | * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During | ||
206 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which | ||
207 | * is used upon resume from suspend. | ||
208 | */ | ||
209 | void load_ucode_amd_ap(void) | ||
210 | { | ||
211 | struct microcode_amd *mc; | ||
212 | unsigned long *initrd; | ||
213 | unsigned long *uoffset; | ||
214 | size_t *usize; | ||
215 | void *ucode; | ||
216 | |||
217 | mc = (struct microcode_amd *)__pa(amd_bsp_mpb); | ||
218 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { | ||
219 | __apply_microcode_amd(mc); | ||
220 | return; | ||
221 | } | ||
222 | |||
223 | initrd = (unsigned long *)__pa(&initrd_start); | ||
224 | uoffset = (unsigned long *)__pa(&ucode_offset); | ||
225 | usize = (size_t *)__pa(&ucode_size); | ||
226 | |||
227 | if (!*usize || !*initrd) | ||
228 | return; | ||
229 | |||
230 | ucode = (void *)((unsigned long)__pa(*initrd) + *uoffset); | ||
231 | apply_ucode_in_initrd(ucode, *usize); | ||
232 | } | ||
233 | |||
234 | static void __init collect_cpu_sig_on_bsp(void *arg) | ||
235 | { | ||
236 | unsigned int cpu = smp_processor_id(); | ||
237 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | ||
239 | } | ||
240 | #else | ||
241 | static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, | ||
242 | struct ucode_cpu_info *uci) | ||
243 | { | ||
244 | u32 rev, eax; | ||
245 | |||
246 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | ||
247 | eax = cpuid_eax(0x00000001); | ||
248 | |||
249 | uci->cpu_sig.sig = eax; | ||
250 | uci->cpu_sig.rev = rev; | ||
251 | c->microcode = rev; | ||
252 | c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | ||
253 | } | ||
254 | |||
255 | void load_ucode_amd_ap(void) | ||
256 | { | ||
257 | unsigned int cpu = smp_processor_id(); | ||
258 | |||
259 | collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu); | ||
260 | |||
261 | if (cpu && !ucode_loaded) { | ||
262 | void *ucode; | ||
263 | |||
264 | if (!ucode_size || !initrd_start) | ||
265 | return; | ||
266 | |||
267 | ucode = (void *)(initrd_start + ucode_offset); | ||
268 | if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) | ||
269 | return; | ||
270 | ucode_loaded = true; | ||
271 | } | ||
272 | |||
273 | apply_microcode_amd(cpu); | ||
274 | } | ||
275 | #endif | ||
276 | |||
277 | int __init save_microcode_in_initrd_amd(void) | ||
278 | { | ||
279 | enum ucode_state ret; | ||
280 | void *ucode; | ||
281 | #ifdef CONFIG_X86_32 | ||
282 | unsigned int bsp = boot_cpu_data.cpu_index; | ||
283 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; | ||
284 | |||
285 | if (!uci->cpu_sig.sig) | ||
286 | smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); | ||
287 | #endif | ||
288 | if (ucode_new_rev) | ||
289 | pr_info("microcode: updated early to new patch_level=0x%08x\n", | ||
290 | ucode_new_rev); | ||
291 | |||
292 | if (ucode_loaded || !ucode_size || !initrd_start) | ||
293 | return 0; | ||
294 | |||
295 | ucode = (void *)(initrd_start + ucode_offset); | ||
296 | ret = load_microcode_amd(0, ucode, ucode_size); | ||
297 | if (ret != UCODE_OK) | ||
298 | return -EINVAL; | ||
299 | |||
300 | ucode_loaded = true; | ||
301 | return 0; | ||
302 | } | ||
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 22db92bbdf1a..15c987698b0f 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -468,7 +468,7 @@ static struct syscore_ops mc_syscore_ops = { | |||
468 | .resume = mc_bp_resume, | 468 | .resume = mc_bp_resume, |
469 | }; | 469 | }; |
470 | 470 | ||
471 | static __cpuinit int | 471 | static int |
472 | mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | 472 | mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) |
473 | { | 473 | { |
474 | unsigned int cpu = (unsigned long)hcpu; | 474 | unsigned int cpu = (unsigned long)hcpu; |
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c index 833d51d6ee06..be7f8514f577 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/microcode_core_early.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <asm/microcode_intel.h> | 20 | #include <asm/microcode_intel.h> |
21 | #include <asm/microcode_amd.h> | ||
21 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
22 | 23 | ||
23 | #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) | 24 | #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) |
@@ -40,7 +41,7 @@ | |||
40 | * | 41 | * |
41 | * x86_vendor() gets vendor information directly through cpuid. | 42 | * x86_vendor() gets vendor information directly through cpuid. |
42 | */ | 43 | */ |
43 | static int __cpuinit x86_vendor(void) | 44 | static int x86_vendor(void) |
44 | { | 45 | { |
45 | u32 eax = 0x00000000; | 46 | u32 eax = 0x00000000; |
46 | u32 ebx, ecx = 0, edx; | 47 | u32 ebx, ecx = 0, edx; |
@@ -56,7 +57,7 @@ static int __cpuinit x86_vendor(void) | |||
56 | return X86_VENDOR_UNKNOWN; | 57 | return X86_VENDOR_UNKNOWN; |
57 | } | 58 | } |
58 | 59 | ||
59 | static int __cpuinit x86_family(void) | 60 | static int x86_family(void) |
60 | { | 61 | { |
61 | u32 eax = 0x00000001; | 62 | u32 eax = 0x00000001; |
62 | u32 ebx, ecx = 0, edx; | 63 | u32 ebx, ecx = 0, edx; |
@@ -81,11 +82,21 @@ void __init load_ucode_bsp(void) | |||
81 | vendor = x86_vendor(); | 82 | vendor = x86_vendor(); |
82 | x86 = x86_family(); | 83 | x86 = x86_family(); |
83 | 84 | ||
84 | if (vendor == X86_VENDOR_INTEL && x86 >= 6) | 85 | switch (vendor) { |
85 | load_ucode_intel_bsp(); | 86 | case X86_VENDOR_INTEL: |
87 | if (x86 >= 6) | ||
88 | load_ucode_intel_bsp(); | ||
89 | break; | ||
90 | case X86_VENDOR_AMD: | ||
91 | if (x86 >= 0x10) | ||
92 | load_ucode_amd_bsp(); | ||
93 | break; | ||
94 | default: | ||
95 | break; | ||
96 | } | ||
86 | } | 97 | } |
87 | 98 | ||
88 | void __cpuinit load_ucode_ap(void) | 99 | void load_ucode_ap(void) |
89 | { | 100 | { |
90 | int vendor, x86; | 101 | int vendor, x86; |
91 | 102 | ||
@@ -95,6 +106,36 @@ void __cpuinit load_ucode_ap(void) | |||
95 | vendor = x86_vendor(); | 106 | vendor = x86_vendor(); |
96 | x86 = x86_family(); | 107 | x86 = x86_family(); |
97 | 108 | ||
98 | if (vendor == X86_VENDOR_INTEL && x86 >= 6) | 109 | switch (vendor) { |
99 | load_ucode_intel_ap(); | 110 | case X86_VENDOR_INTEL: |
111 | if (x86 >= 6) | ||
112 | load_ucode_intel_ap(); | ||
113 | break; | ||
114 | case X86_VENDOR_AMD: | ||
115 | if (x86 >= 0x10) | ||
116 | load_ucode_amd_ap(); | ||
117 | break; | ||
118 | default: | ||
119 | break; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | int __init save_microcode_in_initrd(void) | ||
124 | { | ||
125 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
126 | |||
127 | switch (c->x86_vendor) { | ||
128 | case X86_VENDOR_INTEL: | ||
129 | if (c->x86 >= 6) | ||
130 | save_microcode_in_initrd_intel(); | ||
131 | break; | ||
132 | case X86_VENDOR_AMD: | ||
133 | if (c->x86 >= 0x10) | ||
134 | save_microcode_in_initrd_amd(); | ||
135 | break; | ||
136 | default: | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | return 0; | ||
100 | } | 141 | } |
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index 2e9e12871c2b..1575deb2e636 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c | |||
@@ -34,7 +34,7 @@ struct mc_saved_data { | |||
34 | struct microcode_intel **mc_saved; | 34 | struct microcode_intel **mc_saved; |
35 | } mc_saved_data; | 35 | } mc_saved_data; |
36 | 36 | ||
37 | static enum ucode_state __cpuinit | 37 | static enum ucode_state |
38 | generic_load_microcode_early(struct microcode_intel **mc_saved_p, | 38 | generic_load_microcode_early(struct microcode_intel **mc_saved_p, |
39 | unsigned int mc_saved_count, | 39 | unsigned int mc_saved_count, |
40 | struct ucode_cpu_info *uci) | 40 | struct ucode_cpu_info *uci) |
@@ -69,7 +69,7 @@ out: | |||
69 | return state; | 69 | return state; |
70 | } | 70 | } |
71 | 71 | ||
72 | static void __cpuinit | 72 | static void |
73 | microcode_pointer(struct microcode_intel **mc_saved, | 73 | microcode_pointer(struct microcode_intel **mc_saved, |
74 | unsigned long *mc_saved_in_initrd, | 74 | unsigned long *mc_saved_in_initrd, |
75 | unsigned long initrd_start, int mc_saved_count) | 75 | unsigned long initrd_start, int mc_saved_count) |
@@ -82,7 +82,7 @@ microcode_pointer(struct microcode_intel **mc_saved, | |||
82 | } | 82 | } |
83 | 83 | ||
84 | #ifdef CONFIG_X86_32 | 84 | #ifdef CONFIG_X86_32 |
85 | static void __cpuinit | 85 | static void |
86 | microcode_phys(struct microcode_intel **mc_saved_tmp, | 86 | microcode_phys(struct microcode_intel **mc_saved_tmp, |
87 | struct mc_saved_data *mc_saved_data) | 87 | struct mc_saved_data *mc_saved_data) |
88 | { | 88 | { |
@@ -101,7 +101,7 @@ microcode_phys(struct microcode_intel **mc_saved_tmp, | |||
101 | } | 101 | } |
102 | #endif | 102 | #endif |
103 | 103 | ||
104 | static enum ucode_state __cpuinit | 104 | static enum ucode_state |
105 | load_microcode(struct mc_saved_data *mc_saved_data, | 105 | load_microcode(struct mc_saved_data *mc_saved_data, |
106 | unsigned long *mc_saved_in_initrd, | 106 | unsigned long *mc_saved_in_initrd, |
107 | unsigned long initrd_start, | 107 | unsigned long initrd_start, |
@@ -375,7 +375,7 @@ do { \ | |||
375 | #define native_wrmsr(msr, low, high) \ | 375 | #define native_wrmsr(msr, low, high) \ |
376 | native_write_msr(msr, low, high); | 376 | native_write_msr(msr, low, high); |
377 | 377 | ||
378 | static int __cpuinit collect_cpu_info_early(struct ucode_cpu_info *uci) | 378 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) |
379 | { | 379 | { |
380 | unsigned int val[2]; | 380 | unsigned int val[2]; |
381 | u8 x86, x86_model; | 381 | u8 x86, x86_model; |
@@ -529,7 +529,7 @@ int save_mc_for_early(u8 *mc) | |||
529 | */ | 529 | */ |
530 | ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count); | 530 | ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count); |
531 | if (ret) { | 531 | if (ret) { |
532 | pr_err("Can not save microcode patch.\n"); | 532 | pr_err("Cannot save microcode patch.\n"); |
533 | goto out; | 533 | goto out; |
534 | } | 534 | } |
535 | 535 | ||
@@ -584,7 +584,7 @@ scan_microcode(unsigned long start, unsigned long end, | |||
584 | /* | 584 | /* |
585 | * Print ucode update info. | 585 | * Print ucode update info. |
586 | */ | 586 | */ |
587 | static void __cpuinit | 587 | static void |
588 | print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) | 588 | print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) |
589 | { | 589 | { |
590 | int cpu = smp_processor_id(); | 590 | int cpu = smp_processor_id(); |
@@ -605,7 +605,7 @@ static int current_mc_date; | |||
605 | /* | 605 | /* |
606 | * Print early updated ucode info after printk works. This is delayed info dump. | 606 | * Print early updated ucode info after printk works. This is delayed info dump. |
607 | */ | 607 | */ |
608 | void __cpuinit show_ucode_info_early(void) | 608 | void show_ucode_info_early(void) |
609 | { | 609 | { |
610 | struct ucode_cpu_info uci; | 610 | struct ucode_cpu_info uci; |
611 | 611 | ||
@@ -621,7 +621,7 @@ void __cpuinit show_ucode_info_early(void) | |||
621 | * mc_saved_data.mc_saved and delay printing microcode info in | 621 | * mc_saved_data.mc_saved and delay printing microcode info in |
622 | * show_ucode_info_early() until printk() works. | 622 | * show_ucode_info_early() until printk() works. |
623 | */ | 623 | */ |
624 | static void __cpuinit print_ucode(struct ucode_cpu_info *uci) | 624 | static void print_ucode(struct ucode_cpu_info *uci) |
625 | { | 625 | { |
626 | struct microcode_intel *mc_intel; | 626 | struct microcode_intel *mc_intel; |
627 | int *delay_ucode_info_p; | 627 | int *delay_ucode_info_p; |
@@ -643,12 +643,12 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci) | |||
643 | * Flush global tlb. We only do this in x86_64 where paging has been enabled | 643 | * Flush global tlb. We only do this in x86_64 where paging has been enabled |
644 | * already and PGE should be enabled as well. | 644 | * already and PGE should be enabled as well. |
645 | */ | 645 | */ |
646 | static inline void __cpuinit flush_tlb_early(void) | 646 | static inline void flush_tlb_early(void) |
647 | { | 647 | { |
648 | __native_flush_tlb_global_irq_disabled(); | 648 | __native_flush_tlb_global_irq_disabled(); |
649 | } | 649 | } |
650 | 650 | ||
651 | static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) | 651 | static inline void print_ucode(struct ucode_cpu_info *uci) |
652 | { | 652 | { |
653 | struct microcode_intel *mc_intel; | 653 | struct microcode_intel *mc_intel; |
654 | 654 | ||
@@ -660,8 +660,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) | |||
660 | } | 660 | } |
661 | #endif | 661 | #endif |
662 | 662 | ||
663 | static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, | 663 | static int apply_microcode_early(struct mc_saved_data *mc_saved_data, |
664 | struct ucode_cpu_info *uci) | 664 | struct ucode_cpu_info *uci) |
665 | { | 665 | { |
666 | struct microcode_intel *mc_intel; | 666 | struct microcode_intel *mc_intel; |
667 | unsigned int val[2]; | 667 | unsigned int val[2]; |
@@ -699,7 +699,7 @@ static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, | |||
699 | * This function converts microcode patch offsets previously stored in | 699 | * This function converts microcode patch offsets previously stored in |
700 | * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data. | 700 | * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data. |
701 | */ | 701 | */ |
702 | int __init save_microcode_in_initrd(void) | 702 | int __init save_microcode_in_initrd_intel(void) |
703 | { | 703 | { |
704 | unsigned int count = mc_saved_data.mc_saved_count; | 704 | unsigned int count = mc_saved_data.mc_saved_count; |
705 | struct microcode_intel *mc_saved[MAX_UCODE_COUNT]; | 705 | struct microcode_intel *mc_saved[MAX_UCODE_COUNT]; |
@@ -711,7 +711,7 @@ int __init save_microcode_in_initrd(void) | |||
711 | microcode_pointer(mc_saved, mc_saved_in_initrd, initrd_start, count); | 711 | microcode_pointer(mc_saved, mc_saved_in_initrd, initrd_start, count); |
712 | ret = save_microcode(&mc_saved_data, mc_saved, count); | 712 | ret = save_microcode(&mc_saved_data, mc_saved, count); |
713 | if (ret) | 713 | if (ret) |
714 | pr_err("Can not save microcod patches from initrd"); | 714 | pr_err("Cannot save microcode patches from initrd.\n"); |
715 | 715 | ||
716 | show_saved_mc(); | 716 | show_saved_mc(); |
717 | 717 | ||
@@ -763,7 +763,7 @@ load_ucode_intel_bsp(void) | |||
763 | #endif | 763 | #endif |
764 | } | 764 | } |
765 | 765 | ||
766 | void __cpuinit load_ucode_intel_ap(void) | 766 | void load_ucode_intel_ap(void) |
767 | { | 767 | { |
768 | struct mc_saved_data *mc_saved_data_p; | 768 | struct mc_saved_data *mc_saved_data_p; |
769 | struct ucode_cpu_info uci; | 769 | struct ucode_cpu_info uci; |
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index ac861b8348e2..f4c886d9165c 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c | |||
@@ -24,14 +24,14 @@ struct pci_hostbridge_probe { | |||
24 | u32 device; | 24 | u32 device; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | static u64 __cpuinitdata fam10h_pci_mmconf_base; | 27 | static u64 fam10h_pci_mmconf_base; |
28 | 28 | ||
29 | static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { | 29 | static struct pci_hostbridge_probe pci_probes[] = { |
30 | { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, | 30 | { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, |
31 | { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, | 31 | { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, |
32 | }; | 32 | }; |
33 | 33 | ||
34 | static int __cpuinit cmp_range(const void *x1, const void *x2) | 34 | static int cmp_range(const void *x1, const void *x2) |
35 | { | 35 | { |
36 | const struct range *r1 = x1; | 36 | const struct range *r1 = x1; |
37 | const struct range *r2 = x2; | 37 | const struct range *r2 = x2; |
@@ -49,7 +49,7 @@ static int __cpuinit cmp_range(const void *x1, const void *x2) | |||
49 | /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ | 49 | /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ |
50 | #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) | 50 | #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) |
51 | #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) | 51 | #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) |
52 | static void __cpuinit get_fam10h_pci_mmconf_base(void) | 52 | static void get_fam10h_pci_mmconf_base(void) |
53 | { | 53 | { |
54 | int i; | 54 | int i; |
55 | unsigned bus; | 55 | unsigned bus; |
@@ -166,7 +166,7 @@ out: | |||
166 | fam10h_pci_mmconf_base = base; | 166 | fam10h_pci_mmconf_base = base; |
167 | } | 167 | } |
168 | 168 | ||
169 | void __cpuinit fam10h_check_enable_mmcfg(void) | 169 | void fam10h_check_enable_mmcfg(void) |
170 | { | 170 | { |
171 | u64 val; | 171 | u64 val; |
172 | u32 address; | 172 | u32 address; |
@@ -230,7 +230,7 @@ static const struct dmi_system_id __initconst mmconf_dmi_table[] = { | |||
230 | {} | 230 | {} |
231 | }; | 231 | }; |
232 | 232 | ||
233 | /* Called from a __cpuinit function, but only on the BSP. */ | 233 | /* Called from a non __init function, but only on the BSP. */ |
234 | void __ref check_enable_amd_mmconf_dmi(void) | 234 | void __ref check_enable_amd_mmconf_dmi(void) |
235 | { | 235 | { |
236 | dmi_check_system(mmconf_dmi_table); | 236 | dmi_check_system(mmconf_dmi_table); |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index ce130493b802..88458faea2f8 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -200,7 +200,7 @@ static const struct file_operations msr_fops = { | |||
200 | .compat_ioctl = msr_ioctl, | 200 | .compat_ioctl = msr_ioctl, |
201 | }; | 201 | }; |
202 | 202 | ||
203 | static int __cpuinit msr_device_create(int cpu) | 203 | static int msr_device_create(int cpu) |
204 | { | 204 | { |
205 | struct device *dev; | 205 | struct device *dev; |
206 | 206 | ||
@@ -214,8 +214,8 @@ static void msr_device_destroy(int cpu) | |||
214 | device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); | 214 | device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); |
215 | } | 215 | } |
216 | 216 | ||
217 | static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, | 217 | static int msr_class_cpu_callback(struct notifier_block *nfb, |
218 | unsigned long action, void *hcpu) | 218 | unsigned long action, void *hcpu) |
219 | { | 219 | { |
220 | unsigned int cpu = (unsigned long)hcpu; | 220 | unsigned int cpu = (unsigned long)hcpu; |
221 | int err = 0; | 221 | int err = 0; |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 60308053fdb2..ba77ebc2c353 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/kprobes.h> | 14 | #include <linux/kprobes.h> |
15 | #include <linux/kdebug.h> | 15 | #include <linux/kdebug.h> |
16 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
17 | #include <linux/debugfs.h> | ||
17 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
18 | #include <linux/hardirq.h> | 19 | #include <linux/hardirq.h> |
19 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
@@ -29,6 +30,9 @@ | |||
29 | #include <asm/nmi.h> | 30 | #include <asm/nmi.h> |
30 | #include <asm/x86_init.h> | 31 | #include <asm/x86_init.h> |
31 | 32 | ||
33 | #define CREATE_TRACE_POINTS | ||
34 | #include <trace/events/nmi.h> | ||
35 | |||
32 | struct nmi_desc { | 36 | struct nmi_desc { |
33 | spinlock_t lock; | 37 | spinlock_t lock; |
34 | struct list_head head; | 38 | struct list_head head; |
@@ -82,6 +86,15 @@ __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | |||
82 | 86 | ||
83 | #define nmi_to_desc(type) (&nmi_desc[type]) | 87 | #define nmi_to_desc(type) (&nmi_desc[type]) |
84 | 88 | ||
89 | static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC; | ||
90 | static int __init nmi_warning_debugfs(void) | ||
91 | { | ||
92 | debugfs_create_u64("nmi_longest_ns", 0644, | ||
93 | arch_debugfs_dir, &nmi_longest_ns); | ||
94 | return 0; | ||
95 | } | ||
96 | fs_initcall(nmi_warning_debugfs); | ||
97 | |||
85 | static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) | 98 | static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) |
86 | { | 99 | { |
87 | struct nmi_desc *desc = nmi_to_desc(type); | 100 | struct nmi_desc *desc = nmi_to_desc(type); |
@@ -96,8 +109,28 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 | |||
96 | * can be latched at any given time. Walk the whole list | 109 | * can be latched at any given time. Walk the whole list |
97 | * to handle those situations. | 110 | * to handle those situations. |
98 | */ | 111 | */ |
99 | list_for_each_entry_rcu(a, &desc->head, list) | 112 | list_for_each_entry_rcu(a, &desc->head, list) { |
100 | handled += a->handler(type, regs); | 113 | u64 before, delta, whole_msecs; |
114 | int remainder_ns, decimal_msecs, thishandled; | ||
115 | |||
116 | before = local_clock(); | ||
117 | thishandled = a->handler(type, regs); | ||
118 | handled += thishandled; | ||
119 | delta = local_clock() - before; | ||
120 | trace_nmi_handler(a->handler, (int)delta, thishandled); | ||
121 | |||
122 | if (delta < nmi_longest_ns) | ||
123 | continue; | ||
124 | |||
125 | nmi_longest_ns = delta; | ||
126 | whole_msecs = delta; | ||
127 | remainder_ns = do_div(whole_msecs, (1000 * 1000)); | ||
128 | decimal_msecs = remainder_ns / 1000; | ||
129 | printk_ratelimited(KERN_INFO | ||
130 | "INFO: NMI handler (%ps) took too long to run: " | ||
131 | "%lld.%03d msecs\n", a->handler, whole_msecs, | ||
132 | decimal_msecs); | ||
133 | } | ||
101 | 134 | ||
102 | rcu_read_unlock(); | 135 | rcu_read_unlock(); |
103 | 136 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 81a5f5e8f142..83369e5a1d27 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -398,7 +398,7 @@ static void amd_e400_idle(void) | |||
398 | default_idle(); | 398 | default_idle(); |
399 | } | 399 | } |
400 | 400 | ||
401 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 401 | void select_idle_routine(const struct cpuinfo_x86 *c) |
402 | { | 402 | { |
403 | #ifdef CONFIG_SMP | 403 | #ifdef CONFIG_SMP |
404 | if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) | 404 | if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 7305f7dfc7ab..f8adefca71dc 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -110,11 +110,16 @@ void __show_regs(struct pt_regs *regs, int all) | |||
110 | get_debugreg(d1, 1); | 110 | get_debugreg(d1, 1); |
111 | get_debugreg(d2, 2); | 111 | get_debugreg(d2, 2); |
112 | get_debugreg(d3, 3); | 112 | get_debugreg(d3, 3); |
113 | printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", | ||
114 | d0, d1, d2, d3); | ||
115 | |||
116 | get_debugreg(d6, 6); | 113 | get_debugreg(d6, 6); |
117 | get_debugreg(d7, 7); | 114 | get_debugreg(d7, 7); |
115 | |||
116 | /* Only print out debug registers if they are in their non-default state. */ | ||
117 | if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && | ||
118 | (d6 == DR6_RESERVED) && (d7 == 0x400)) | ||
119 | return; | ||
120 | |||
121 | printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", | ||
122 | d0, d1, d2, d3); | ||
118 | printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", | 123 | printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", |
119 | d6, d7); | 124 | d6, d7); |
120 | } | 125 | } |
@@ -147,7 +152,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
147 | childregs->bp = arg; | 152 | childregs->bp = arg; |
148 | childregs->orig_ax = -1; | 153 | childregs->orig_ax = -1; |
149 | childregs->cs = __KERNEL_CS | get_kernel_rpl(); | 154 | childregs->cs = __KERNEL_CS | get_kernel_rpl(); |
150 | childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; | 155 | childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; |
151 | p->fpu_counter = 0; | 156 | p->fpu_counter = 0; |
152 | p->thread.io_bitmap_ptr = NULL; | 157 | p->thread.io_bitmap_ptr = NULL; |
153 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | 158 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 355ae06dbf94..05646bab4ca6 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -105,11 +105,18 @@ void __show_regs(struct pt_regs *regs, int all) | |||
105 | get_debugreg(d0, 0); | 105 | get_debugreg(d0, 0); |
106 | get_debugreg(d1, 1); | 106 | get_debugreg(d1, 1); |
107 | get_debugreg(d2, 2); | 107 | get_debugreg(d2, 2); |
108 | printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); | ||
109 | get_debugreg(d3, 3); | 108 | get_debugreg(d3, 3); |
110 | get_debugreg(d6, 6); | 109 | get_debugreg(d6, 6); |
111 | get_debugreg(d7, 7); | 110 | get_debugreg(d7, 7); |
111 | |||
112 | /* Only print out debug registers if they are in their non-default state. */ | ||
113 | if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && | ||
114 | (d6 == DR6_RESERVED) && (d7 == 0x400)) | ||
115 | return; | ||
116 | |||
117 | printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); | ||
112 | printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); | 118 | printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); |
119 | |||
113 | } | 120 | } |
114 | 121 | ||
115 | void release_thread(struct task_struct *dead_task) | 122 | void release_thread(struct task_struct *dead_task) |
@@ -176,7 +183,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
176 | childregs->bp = arg; | 183 | childregs->bp = arg; |
177 | childregs->orig_ax = -1; | 184 | childregs->orig_ax = -1; |
178 | childregs->cs = __KERNEL_CS | get_kernel_rpl(); | 185 | childregs->cs = __KERNEL_CS | get_kernel_rpl(); |
179 | childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; | 186 | childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; |
180 | return 0; | 187 | return 0; |
181 | } | 188 | } |
182 | *childregs = *current_pt_regs(); | 189 | *childregs = *current_pt_regs(); |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 29a8120e6fe8..7461f50d5bb1 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -601,30 +601,48 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) | |||
601 | return dr7; | 601 | return dr7; |
602 | } | 602 | } |
603 | 603 | ||
604 | static int | 604 | static int ptrace_fill_bp_fields(struct perf_event_attr *attr, |
605 | ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, | 605 | int len, int type, bool disabled) |
606 | struct task_struct *tsk, int disabled) | 606 | { |
607 | int err, bp_len, bp_type; | ||
608 | |||
609 | err = arch_bp_generic_fields(len, type, &bp_len, &bp_type); | ||
610 | if (!err) { | ||
611 | attr->bp_len = bp_len; | ||
612 | attr->bp_type = bp_type; | ||
613 | attr->disabled = disabled; | ||
614 | } | ||
615 | |||
616 | return err; | ||
617 | } | ||
618 | |||
619 | static struct perf_event * | ||
620 | ptrace_register_breakpoint(struct task_struct *tsk, int len, int type, | ||
621 | unsigned long addr, bool disabled) | ||
607 | { | 622 | { |
608 | int err; | ||
609 | int gen_len, gen_type; | ||
610 | struct perf_event_attr attr; | 623 | struct perf_event_attr attr; |
624 | int err; | ||
611 | 625 | ||
612 | /* | 626 | ptrace_breakpoint_init(&attr); |
613 | * We should have at least an inactive breakpoint at this | 627 | attr.bp_addr = addr; |
614 | * slot. It means the user is writing dr7 without having | ||
615 | * written the address register first | ||
616 | */ | ||
617 | if (!bp) | ||
618 | return -EINVAL; | ||
619 | 628 | ||
620 | err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); | 629 | err = ptrace_fill_bp_fields(&attr, len, type, disabled); |
621 | if (err) | 630 | if (err) |
622 | return err; | 631 | return ERR_PTR(err); |
632 | |||
633 | return register_user_hw_breakpoint(&attr, ptrace_triggered, | ||
634 | NULL, tsk); | ||
635 | } | ||
623 | 636 | ||
624 | attr = bp->attr; | 637 | static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, |
625 | attr.bp_len = gen_len; | 638 | int disabled) |
626 | attr.bp_type = gen_type; | 639 | { |
627 | attr.disabled = disabled; | 640 | struct perf_event_attr attr = bp->attr; |
641 | int err; | ||
642 | |||
643 | err = ptrace_fill_bp_fields(&attr, len, type, disabled); | ||
644 | if (err) | ||
645 | return err; | ||
628 | 646 | ||
629 | return modify_user_hw_breakpoint(bp, &attr); | 647 | return modify_user_hw_breakpoint(bp, &attr); |
630 | } | 648 | } |
@@ -634,67 +652,50 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, | |||
634 | */ | 652 | */ |
635 | static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) | 653 | static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) |
636 | { | 654 | { |
637 | struct thread_struct *thread = &(tsk->thread); | 655 | struct thread_struct *thread = &tsk->thread; |
638 | unsigned long old_dr7; | 656 | unsigned long old_dr7; |
639 | int i, orig_ret = 0, rc = 0; | 657 | bool second_pass = false; |
640 | int enabled, second_pass = 0; | 658 | int i, rc, ret = 0; |
641 | unsigned len, type; | ||
642 | struct perf_event *bp; | ||
643 | |||
644 | if (ptrace_get_breakpoints(tsk) < 0) | ||
645 | return -ESRCH; | ||
646 | 659 | ||
647 | data &= ~DR_CONTROL_RESERVED; | 660 | data &= ~DR_CONTROL_RESERVED; |
648 | old_dr7 = ptrace_get_dr7(thread->ptrace_bps); | 661 | old_dr7 = ptrace_get_dr7(thread->ptrace_bps); |
662 | |||
649 | restore: | 663 | restore: |
650 | /* | 664 | rc = 0; |
651 | * Loop through all the hardware breakpoints, making the | ||
652 | * appropriate changes to each. | ||
653 | */ | ||
654 | for (i = 0; i < HBP_NUM; i++) { | 665 | for (i = 0; i < HBP_NUM; i++) { |
655 | enabled = decode_dr7(data, i, &len, &type); | 666 | unsigned len, type; |
656 | bp = thread->ptrace_bps[i]; | 667 | bool disabled = !decode_dr7(data, i, &len, &type); |
657 | 668 | struct perf_event *bp = thread->ptrace_bps[i]; | |
658 | if (!enabled) { | 669 | |
659 | if (bp) { | 670 | if (!bp) { |
660 | /* | 671 | if (disabled) |
661 | * Don't unregister the breakpoints right-away, | 672 | continue; |
662 | * unless all register_user_hw_breakpoint() | 673 | |
663 | * requests have succeeded. This prevents | 674 | bp = ptrace_register_breakpoint(tsk, |
664 | * any window of opportunity for debug | 675 | len, type, 0, disabled); |
665 | * register grabbing by other users. | 676 | if (IS_ERR(bp)) { |
666 | */ | 677 | rc = PTR_ERR(bp); |
667 | if (!second_pass) | 678 | break; |
668 | continue; | ||
669 | |||
670 | rc = ptrace_modify_breakpoint(bp, len, type, | ||
671 | tsk, 1); | ||
672 | if (rc) | ||
673 | break; | ||
674 | } | 679 | } |
680 | |||
681 | thread->ptrace_bps[i] = bp; | ||
675 | continue; | 682 | continue; |
676 | } | 683 | } |
677 | 684 | ||
678 | rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); | 685 | rc = ptrace_modify_breakpoint(bp, len, type, disabled); |
679 | if (rc) | 686 | if (rc) |
680 | break; | 687 | break; |
681 | } | 688 | } |
682 | /* | 689 | |
683 | * Make a second pass to free the remaining unused breakpoints | 690 | /* Restore if the first pass failed, second_pass shouldn't fail. */ |
684 | * or to restore the original breakpoints if an error occurred. | 691 | if (rc && !WARN_ON(second_pass)) { |
685 | */ | 692 | ret = rc; |
686 | if (!second_pass) { | 693 | data = old_dr7; |
687 | second_pass = 1; | 694 | second_pass = true; |
688 | if (rc < 0) { | ||
689 | orig_ret = rc; | ||
690 | data = old_dr7; | ||
691 | } | ||
692 | goto restore; | 695 | goto restore; |
693 | } | 696 | } |
694 | 697 | ||
695 | ptrace_put_breakpoints(tsk); | 698 | return ret; |
696 | |||
697 | return ((orig_ret < 0) ? orig_ret : rc); | ||
698 | } | 699 | } |
699 | 700 | ||
700 | /* | 701 | /* |
@@ -702,25 +703,17 @@ restore: | |||
702 | */ | 703 | */ |
703 | static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) | 704 | static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) |
704 | { | 705 | { |
705 | struct thread_struct *thread = &(tsk->thread); | 706 | struct thread_struct *thread = &tsk->thread; |
706 | unsigned long val = 0; | 707 | unsigned long val = 0; |
707 | 708 | ||
708 | if (n < HBP_NUM) { | 709 | if (n < HBP_NUM) { |
709 | struct perf_event *bp; | 710 | struct perf_event *bp = thread->ptrace_bps[n]; |
710 | 711 | ||
711 | if (ptrace_get_breakpoints(tsk) < 0) | 712 | if (bp) |
712 | return -ESRCH; | ||
713 | |||
714 | bp = thread->ptrace_bps[n]; | ||
715 | if (!bp) | ||
716 | val = 0; | ||
717 | else | ||
718 | val = bp->hw.info.address; | 713 | val = bp->hw.info.address; |
719 | |||
720 | ptrace_put_breakpoints(tsk); | ||
721 | } else if (n == 6) { | 714 | } else if (n == 6) { |
722 | val = thread->debugreg6; | 715 | val = thread->debugreg6; |
723 | } else if (n == 7) { | 716 | } else if (n == 7) { |
724 | val = thread->ptrace_dr7; | 717 | val = thread->ptrace_dr7; |
725 | } | 718 | } |
726 | return val; | 719 | return val; |
@@ -729,29 +722,14 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) | |||
729 | static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | 722 | static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, |
730 | unsigned long addr) | 723 | unsigned long addr) |
731 | { | 724 | { |
732 | struct perf_event *bp; | ||
733 | struct thread_struct *t = &tsk->thread; | 725 | struct thread_struct *t = &tsk->thread; |
734 | struct perf_event_attr attr; | 726 | struct perf_event *bp = t->ptrace_bps[nr]; |
735 | int err = 0; | 727 | int err = 0; |
736 | 728 | ||
737 | if (ptrace_get_breakpoints(tsk) < 0) | 729 | if (!bp) { |
738 | return -ESRCH; | ||
739 | |||
740 | if (!t->ptrace_bps[nr]) { | ||
741 | ptrace_breakpoint_init(&attr); | ||
742 | /* | ||
743 | * Put stub len and type to register (reserve) an inactive but | ||
744 | * correct bp | ||
745 | */ | ||
746 | attr.bp_addr = addr; | ||
747 | attr.bp_len = HW_BREAKPOINT_LEN_1; | ||
748 | attr.bp_type = HW_BREAKPOINT_W; | ||
749 | attr.disabled = 1; | ||
750 | |||
751 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, | ||
752 | NULL, tsk); | ||
753 | |||
754 | /* | 730 | /* |
731 | * Put stub len and type to create an inactive but correct bp. | ||
732 | * | ||
755 | * CHECKME: the previous code returned -EIO if the addr wasn't | 733 | * CHECKME: the previous code returned -EIO if the addr wasn't |
756 | * a valid task virtual addr. The new one will return -EINVAL in | 734 | * a valid task virtual addr. The new one will return -EINVAL in |
757 | * this case. | 735 | * this case. |
@@ -760,22 +738,20 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | |||
760 | * writing for the user. And anyway this is the previous | 738 | * writing for the user. And anyway this is the previous |
761 | * behaviour. | 739 | * behaviour. |
762 | */ | 740 | */ |
763 | if (IS_ERR(bp)) { | 741 | bp = ptrace_register_breakpoint(tsk, |
742 | X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE, | ||
743 | addr, true); | ||
744 | if (IS_ERR(bp)) | ||
764 | err = PTR_ERR(bp); | 745 | err = PTR_ERR(bp); |
765 | goto put; | 746 | else |
766 | } | 747 | t->ptrace_bps[nr] = bp; |
767 | |||
768 | t->ptrace_bps[nr] = bp; | ||
769 | } else { | 748 | } else { |
770 | bp = t->ptrace_bps[nr]; | 749 | struct perf_event_attr attr = bp->attr; |
771 | 750 | ||
772 | attr = bp->attr; | ||
773 | attr.bp_addr = addr; | 751 | attr.bp_addr = addr; |
774 | err = modify_user_hw_breakpoint(bp, &attr); | 752 | err = modify_user_hw_breakpoint(bp, &attr); |
775 | } | 753 | } |
776 | 754 | ||
777 | put: | ||
778 | ptrace_put_breakpoints(tsk); | ||
779 | return err; | 755 | return err; |
780 | } | 756 | } |
781 | 757 | ||
@@ -785,30 +761,20 @@ put: | |||
785 | static int ptrace_set_debugreg(struct task_struct *tsk, int n, | 761 | static int ptrace_set_debugreg(struct task_struct *tsk, int n, |
786 | unsigned long val) | 762 | unsigned long val) |
787 | { | 763 | { |
788 | struct thread_struct *thread = &(tsk->thread); | 764 | struct thread_struct *thread = &tsk->thread; |
789 | int rc = 0; | ||
790 | |||
791 | /* There are no DR4 or DR5 registers */ | 765 | /* There are no DR4 or DR5 registers */ |
792 | if (n == 4 || n == 5) | 766 | int rc = -EIO; |
793 | return -EIO; | ||
794 | 767 | ||
795 | if (n == 6) { | ||
796 | thread->debugreg6 = val; | ||
797 | goto ret_path; | ||
798 | } | ||
799 | if (n < HBP_NUM) { | 768 | if (n < HBP_NUM) { |
800 | rc = ptrace_set_breakpoint_addr(tsk, n, val); | 769 | rc = ptrace_set_breakpoint_addr(tsk, n, val); |
801 | if (rc) | 770 | } else if (n == 6) { |
802 | return rc; | 771 | thread->debugreg6 = val; |
803 | } | 772 | rc = 0; |
804 | /* All that's left is DR7 */ | 773 | } else if (n == 7) { |
805 | if (n == 7) { | ||
806 | rc = ptrace_write_dr7(tsk, val); | 774 | rc = ptrace_write_dr7(tsk, val); |
807 | if (!rc) | 775 | if (!rc) |
808 | thread->ptrace_dr7 = val; | 776 | thread->ptrace_dr7 = val; |
809 | } | 777 | } |
810 | |||
811 | ret_path: | ||
812 | return rc; | 778 | return rc; |
813 | } | 779 | } |
814 | 780 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 76fa1e9a2b39..563ed91e6faa 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -36,22 +36,6 @@ void (*pm_power_off)(void); | |||
36 | EXPORT_SYMBOL(pm_power_off); | 36 | EXPORT_SYMBOL(pm_power_off); |
37 | 37 | ||
38 | static const struct desc_ptr no_idt = {}; | 38 | static const struct desc_ptr no_idt = {}; |
39 | static int reboot_mode; | ||
40 | enum reboot_type reboot_type = BOOT_ACPI; | ||
41 | int reboot_force; | ||
42 | |||
43 | /* | ||
44 | * This variable is used privately to keep track of whether or not | ||
45 | * reboot_type is still set to its default value (i.e., reboot= hasn't | ||
46 | * been set on the command line). This is needed so that we can | ||
47 | * suppress DMI scanning for reboot quirks. Without it, it's | ||
48 | * impossible to override a faulty reboot quirk without recompiling. | ||
49 | */ | ||
50 | static int reboot_default = 1; | ||
51 | |||
52 | #ifdef CONFIG_SMP | ||
53 | static int reboot_cpu = -1; | ||
54 | #endif | ||
55 | 39 | ||
56 | /* | 40 | /* |
57 | * This is set if we need to go through the 'emergency' path. | 41 | * This is set if we need to go through the 'emergency' path. |
@@ -64,79 +48,6 @@ static int reboot_emergency; | |||
64 | bool port_cf9_safe = false; | 48 | bool port_cf9_safe = false; |
65 | 49 | ||
66 | /* | 50 | /* |
67 | * reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci] | ||
68 | * warm Don't set the cold reboot flag | ||
69 | * cold Set the cold reboot flag | ||
70 | * bios Reboot by jumping through the BIOS | ||
71 | * smp Reboot by executing reset on BSP or other CPU | ||
72 | * triple Force a triple fault (init) | ||
73 | * kbd Use the keyboard controller. cold reset (default) | ||
74 | * acpi Use the RESET_REG in the FADT | ||
75 | * efi Use efi reset_system runtime service | ||
76 | * pci Use the so-called "PCI reset register", CF9 | ||
77 | * force Avoid anything that could hang. | ||
78 | */ | ||
79 | static int __init reboot_setup(char *str) | ||
80 | { | ||
81 | for (;;) { | ||
82 | /* | ||
83 | * Having anything passed on the command line via | ||
84 | * reboot= will cause us to disable DMI checking | ||
85 | * below. | ||
86 | */ | ||
87 | reboot_default = 0; | ||
88 | |||
89 | switch (*str) { | ||
90 | case 'w': | ||
91 | reboot_mode = 0x1234; | ||
92 | break; | ||
93 | |||
94 | case 'c': | ||
95 | reboot_mode = 0; | ||
96 | break; | ||
97 | |||
98 | #ifdef CONFIG_SMP | ||
99 | case 's': | ||
100 | if (isdigit(*(str+1))) { | ||
101 | reboot_cpu = (int) (*(str+1) - '0'); | ||
102 | if (isdigit(*(str+2))) | ||
103 | reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0'); | ||
104 | } | ||
105 | /* | ||
106 | * We will leave sorting out the final value | ||
107 | * when we are ready to reboot, since we might not | ||
108 | * have detected BSP APIC ID or smp_num_cpu | ||
109 | */ | ||
110 | break; | ||
111 | #endif /* CONFIG_SMP */ | ||
112 | |||
113 | case 'b': | ||
114 | case 'a': | ||
115 | case 'k': | ||
116 | case 't': | ||
117 | case 'e': | ||
118 | case 'p': | ||
119 | reboot_type = *str; | ||
120 | break; | ||
121 | |||
122 | case 'f': | ||
123 | reboot_force = 1; | ||
124 | break; | ||
125 | } | ||
126 | |||
127 | str = strchr(str, ','); | ||
128 | if (str) | ||
129 | str++; | ||
130 | else | ||
131 | break; | ||
132 | } | ||
133 | return 1; | ||
134 | } | ||
135 | |||
136 | __setup("reboot=", reboot_setup); | ||
137 | |||
138 | |||
139 | /* | ||
140 | * Reboot options and system auto-detection code provided by | 51 | * Reboot options and system auto-detection code provided by |
141 | * Dell Inc. so their systems "just work". :-) | 52 | * Dell Inc. so their systems "just work". :-) |
142 | */ | 53 | */ |
@@ -536,6 +447,7 @@ static void native_machine_emergency_restart(void) | |||
536 | int i; | 447 | int i; |
537 | int attempt = 0; | 448 | int attempt = 0; |
538 | int orig_reboot_type = reboot_type; | 449 | int orig_reboot_type = reboot_type; |
450 | unsigned short mode; | ||
539 | 451 | ||
540 | if (reboot_emergency) | 452 | if (reboot_emergency) |
541 | emergency_vmx_disable_all(); | 453 | emergency_vmx_disable_all(); |
@@ -543,7 +455,8 @@ static void native_machine_emergency_restart(void) | |||
543 | tboot_shutdown(TB_SHUTDOWN_REBOOT); | 455 | tboot_shutdown(TB_SHUTDOWN_REBOOT); |
544 | 456 | ||
545 | /* Tell the BIOS if we want cold or warm reboot */ | 457 | /* Tell the BIOS if we want cold or warm reboot */ |
546 | *((unsigned short *)__va(0x472)) = reboot_mode; | 458 | mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0; |
459 | *((unsigned short *)__va(0x472)) = mode; | ||
547 | 460 | ||
548 | for (;;) { | 461 | for (;;) { |
549 | /* Could also try the reset bit in the Hammer NB */ | 462 | /* Could also try the reset bit in the Hammer NB */ |
@@ -585,7 +498,7 @@ static void native_machine_emergency_restart(void) | |||
585 | 498 | ||
586 | case BOOT_EFI: | 499 | case BOOT_EFI: |
587 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | 500 | if (efi_enabled(EFI_RUNTIME_SERVICES)) |
588 | efi.reset_system(reboot_mode ? | 501 | efi.reset_system(reboot_mode == REBOOT_WARM ? |
589 | EFI_RESET_WARM : | 502 | EFI_RESET_WARM : |
590 | EFI_RESET_COLD, | 503 | EFI_RESET_COLD, |
591 | EFI_SUCCESS, 0, NULL); | 504 | EFI_SUCCESS, 0, NULL); |
@@ -614,26 +527,10 @@ void native_machine_shutdown(void) | |||
614 | { | 527 | { |
615 | /* Stop the cpus and apics */ | 528 | /* Stop the cpus and apics */ |
616 | #ifdef CONFIG_SMP | 529 | #ifdef CONFIG_SMP |
617 | |||
618 | /* The boot cpu is always logical cpu 0 */ | ||
619 | int reboot_cpu_id = 0; | ||
620 | |||
621 | /* See if there has been given a command line override */ | ||
622 | if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && | ||
623 | cpu_online(reboot_cpu)) | ||
624 | reboot_cpu_id = reboot_cpu; | ||
625 | |||
626 | /* Make certain the cpu I'm about to reboot on is online */ | ||
627 | if (!cpu_online(reboot_cpu_id)) | ||
628 | reboot_cpu_id = smp_processor_id(); | ||
629 | |||
630 | /* Make certain I only run on the appropriate processor */ | ||
631 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); | ||
632 | |||
633 | /* | 530 | /* |
634 | * O.K Now that I'm on the appropriate processor, stop all of the | 531 | * Stop all of the others. Also disable the local irq to |
635 | * others. Also disable the local irq to not receive the per-cpu | 532 | * not receive the per-cpu timer interrupt which may trigger |
636 | * timer interrupt which may trigger scheduler's load balance. | 533 | * scheduler's load balance. |
637 | */ | 534 | */ |
638 | local_irq_disable(); | 535 | local_irq_disable(); |
639 | stop_other_cpus(); | 536 | stop_other_cpus(); |
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index 36818f8ec2be..e13f8e7c22a6 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S | |||
@@ -186,7 +186,7 @@ identity_mapped: | |||
186 | movl CP_PA_PGD(%ebx), %eax | 186 | movl CP_PA_PGD(%ebx), %eax |
187 | movl %eax, %cr3 | 187 | movl %eax, %cr3 |
188 | movl %cr0, %eax | 188 | movl %cr0, %eax |
189 | orl $(1<<31), %eax | 189 | orl $X86_CR0_PG, %eax |
190 | movl %eax, %cr0 | 190 | movl %eax, %cr0 |
191 | lea PAGE_SIZE(%edi), %esp | 191 | lea PAGE_SIZE(%edi), %esp |
192 | movl %edi, %eax | 192 | movl %edi, %eax |
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index f2bb9c96720a..3fd2c693e475 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S | |||
@@ -151,21 +151,21 @@ identity_mapped: | |||
151 | 151 | ||
152 | testq %r11, %r11 | 152 | testq %r11, %r11 |
153 | jnz 1f | 153 | jnz 1f |
154 | xorq %rax, %rax | 154 | xorl %eax, %eax |
155 | xorq %rbx, %rbx | 155 | xorl %ebx, %ebx |
156 | xorq %rcx, %rcx | 156 | xorl %ecx, %ecx |
157 | xorq %rdx, %rdx | 157 | xorl %edx, %edx |
158 | xorq %rsi, %rsi | 158 | xorl %esi, %esi |
159 | xorq %rdi, %rdi | 159 | xorl %edi, %edi |
160 | xorq %rbp, %rbp | 160 | xorl %ebp, %ebp |
161 | xorq %r8, %r8 | 161 | xorl %r8d, %r8d |
162 | xorq %r9, %r9 | 162 | xorl %r9d, %r9d |
163 | xorq %r10, %r10 | 163 | xorl %r10d, %r10d |
164 | xorq %r11, %r11 | 164 | xorl %r11d, %r11d |
165 | xorq %r12, %r12 | 165 | xorl %r12d, %r12d |
166 | xorq %r13, %r13 | 166 | xorl %r13d, %r13d |
167 | xorq %r14, %r14 | 167 | xorl %r14d, %r14d |
168 | xorq %r15, %r15 | 168 | xorl %r15d, %r15d |
169 | 169 | ||
170 | ret | 170 | ret |
171 | 171 | ||
@@ -212,8 +212,8 @@ virtual_mapped: | |||
212 | /* Do the copies */ | 212 | /* Do the copies */ |
213 | swap_pages: | 213 | swap_pages: |
214 | movq %rdi, %rcx /* Put the page_list in %rcx */ | 214 | movq %rdi, %rcx /* Put the page_list in %rcx */ |
215 | xorq %rdi, %rdi | 215 | xorl %edi, %edi |
216 | xorq %rsi, %rsi | 216 | xorl %esi, %esi |
217 | jmp 1f | 217 | jmp 1f |
218 | 218 | ||
219 | 0: /* top, read another word for the indirection page */ | 219 | 0: /* top, read another word for the indirection page */ |
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 198eb201ed3b..0aa29394ed6f 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -38,8 +38,9 @@ EXPORT_SYMBOL(rtc_lock); | |||
38 | * jump to the next second precisely 500 ms later. Check the Motorola | 38 | * jump to the next second precisely 500 ms later. Check the Motorola |
39 | * MC146818A or Dallas DS12887 data sheet for details. | 39 | * MC146818A or Dallas DS12887 data sheet for details. |
40 | */ | 40 | */ |
41 | int mach_set_rtc_mmss(unsigned long nowtime) | 41 | int mach_set_rtc_mmss(const struct timespec *now) |
42 | { | 42 | { |
43 | unsigned long nowtime = now->tv_sec; | ||
43 | struct rtc_time tm; | 44 | struct rtc_time tm; |
44 | int retval = 0; | 45 | int retval = 0; |
45 | 46 | ||
@@ -58,7 +59,7 @@ int mach_set_rtc_mmss(unsigned long nowtime) | |||
58 | return retval; | 59 | return retval; |
59 | } | 60 | } |
60 | 61 | ||
61 | unsigned long mach_get_cmos_time(void) | 62 | void mach_get_cmos_time(struct timespec *now) |
62 | { | 63 | { |
63 | unsigned int status, year, mon, day, hour, min, sec, century = 0; | 64 | unsigned int status, year, mon, day, hour, min, sec, century = 0; |
64 | unsigned long flags; | 65 | unsigned long flags; |
@@ -107,7 +108,8 @@ unsigned long mach_get_cmos_time(void) | |||
107 | } else | 108 | } else |
108 | year += CMOS_YEARS_OFFS; | 109 | year += CMOS_YEARS_OFFS; |
109 | 110 | ||
110 | return mktime(year, mon, day, hour, min, sec); | 111 | now->tv_sec = mktime(year, mon, day, hour, min, sec); |
112 | now->tv_nsec = 0; | ||
111 | } | 113 | } |
112 | 114 | ||
113 | /* Routines for accessing the CMOS RAM/RTC. */ | 115 | /* Routines for accessing the CMOS RAM/RTC. */ |
@@ -135,18 +137,13 @@ EXPORT_SYMBOL(rtc_cmos_write); | |||
135 | 137 | ||
136 | int update_persistent_clock(struct timespec now) | 138 | int update_persistent_clock(struct timespec now) |
137 | { | 139 | { |
138 | return x86_platform.set_wallclock(now.tv_sec); | 140 | return x86_platform.set_wallclock(&now); |
139 | } | 141 | } |
140 | 142 | ||
141 | /* not static: needed by APM */ | 143 | /* not static: needed by APM */ |
142 | void read_persistent_clock(struct timespec *ts) | 144 | void read_persistent_clock(struct timespec *ts) |
143 | { | 145 | { |
144 | unsigned long retval; | 146 | x86_platform.get_wallclock(ts); |
145 | |||
146 | retval = x86_platform.get_wallclock(); | ||
147 | |||
148 | ts->tv_sec = retval; | ||
149 | ts->tv_nsec = 0; | ||
150 | } | 147 | } |
151 | 148 | ||
152 | 149 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 56f7fcfe7fa2..f8ec57815c05 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -170,7 +170,7 @@ static struct resource bss_resource = { | |||
170 | 170 | ||
171 | #ifdef CONFIG_X86_32 | 171 | #ifdef CONFIG_X86_32 |
172 | /* cpu data as detected by the assembly code in head.S */ | 172 | /* cpu data as detected by the assembly code in head.S */ |
173 | struct cpuinfo_x86 new_cpu_data __cpuinitdata = { | 173 | struct cpuinfo_x86 new_cpu_data = { |
174 | .wp_works_ok = -1, | 174 | .wp_works_ok = -1, |
175 | }; | 175 | }; |
176 | /* common cpu data for all cpus */ | 176 | /* common cpu data for all cpus */ |
@@ -1040,8 +1040,6 @@ void __init setup_arch(char **cmdline_p) | |||
1040 | /* max_low_pfn get updated here */ | 1040 | /* max_low_pfn get updated here */ |
1041 | find_low_pfn_range(); | 1041 | find_low_pfn_range(); |
1042 | #else | 1042 | #else |
1043 | num_physpages = max_pfn; | ||
1044 | |||
1045 | check_x2apic(); | 1043 | check_x2apic(); |
1046 | 1044 | ||
1047 | /* How many end-of-memory variables you have, grandma! */ | 1045 | /* How many end-of-memory variables you have, grandma! */ |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 69562992e457..cf913587d4dd 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -43,12 +43,6 @@ | |||
43 | 43 | ||
44 | #include <asm/sigframe.h> | 44 | #include <asm/sigframe.h> |
45 | 45 | ||
46 | #ifdef CONFIG_X86_32 | ||
47 | # define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) | ||
48 | #else | ||
49 | # define FIX_EFLAGS __FIX_EFLAGS | ||
50 | #endif | ||
51 | |||
52 | #define COPY(x) do { \ | 46 | #define COPY(x) do { \ |
53 | get_user_ex(regs->x, &sc->x); \ | 47 | get_user_ex(regs->x, &sc->x); \ |
54 | } while (0) | 48 | } while (0) |
@@ -668,15 +662,17 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) | |||
668 | if (!failed) { | 662 | if (!failed) { |
669 | /* | 663 | /* |
670 | * Clear the direction flag as per the ABI for function entry. | 664 | * Clear the direction flag as per the ABI for function entry. |
671 | */ | 665 | * |
672 | regs->flags &= ~X86_EFLAGS_DF; | 666 | * Clear RF when entering the signal handler, because |
673 | /* | 667 | * it might disable possible debug exception from the |
668 | * signal handler. | ||
669 | * | ||
674 | * Clear TF when entering the signal handler, but | 670 | * Clear TF when entering the signal handler, but |
675 | * notify any tracer that was single-stepping it. | 671 | * notify any tracer that was single-stepping it. |
676 | * The tracer may want to single-step inside the | 672 | * The tracer may want to single-step inside the |
677 | * handler too. | 673 | * handler too. |
678 | */ | 674 | */ |
679 | regs->flags &= ~X86_EFLAGS_TF; | 675 | regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); |
680 | } | 676 | } |
681 | signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP)); | 677 | signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP)); |
682 | } | 678 | } |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 48d2b7ded422..cdaa347dfcad 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/proto.h> | 30 | #include <asm/proto.h> |
31 | #include <asm/apic.h> | 31 | #include <asm/apic.h> |
32 | #include <asm/nmi.h> | 32 | #include <asm/nmi.h> |
33 | #include <asm/trace/irq_vectors.h> | ||
33 | /* | 34 | /* |
34 | * Some notes on x86 processor bugs affecting SMP operation: | 35 | * Some notes on x86 processor bugs affecting SMP operation: |
35 | * | 36 | * |
@@ -249,32 +250,87 @@ finish: | |||
249 | /* | 250 | /* |
250 | * Reschedule call back. | 251 | * Reschedule call back. |
251 | */ | 252 | */ |
252 | void smp_reschedule_interrupt(struct pt_regs *regs) | 253 | static inline void __smp_reschedule_interrupt(void) |
253 | { | 254 | { |
254 | ack_APIC_irq(); | ||
255 | inc_irq_stat(irq_resched_count); | 255 | inc_irq_stat(irq_resched_count); |
256 | scheduler_ipi(); | 256 | scheduler_ipi(); |
257 | } | ||
258 | |||
259 | void smp_reschedule_interrupt(struct pt_regs *regs) | ||
260 | { | ||
261 | ack_APIC_irq(); | ||
262 | __smp_reschedule_interrupt(); | ||
257 | /* | 263 | /* |
258 | * KVM uses this interrupt to force a cpu out of guest mode | 264 | * KVM uses this interrupt to force a cpu out of guest mode |
259 | */ | 265 | */ |
260 | } | 266 | } |
261 | 267 | ||
262 | void smp_call_function_interrupt(struct pt_regs *regs) | 268 | static inline void smp_entering_irq(void) |
263 | { | 269 | { |
264 | ack_APIC_irq(); | 270 | ack_APIC_irq(); |
265 | irq_enter(); | 271 | irq_enter(); |
272 | } | ||
273 | |||
274 | void smp_trace_reschedule_interrupt(struct pt_regs *regs) | ||
275 | { | ||
276 | /* | ||
277 | * Need to call irq_enter() before calling the trace point. | ||
278 | * __smp_reschedule_interrupt() calls irq_enter/exit() too (in | ||
279 | * scheduler_ipi(). This is OK, since those functions are allowed | ||
280 | * to nest. | ||
281 | */ | ||
282 | smp_entering_irq(); | ||
283 | trace_reschedule_entry(RESCHEDULE_VECTOR); | ||
284 | __smp_reschedule_interrupt(); | ||
285 | trace_reschedule_exit(RESCHEDULE_VECTOR); | ||
286 | exiting_irq(); | ||
287 | /* | ||
288 | * KVM uses this interrupt to force a cpu out of guest mode | ||
289 | */ | ||
290 | } | ||
291 | |||
292 | static inline void __smp_call_function_interrupt(void) | ||
293 | { | ||
266 | generic_smp_call_function_interrupt(); | 294 | generic_smp_call_function_interrupt(); |
267 | inc_irq_stat(irq_call_count); | 295 | inc_irq_stat(irq_call_count); |
268 | irq_exit(); | ||
269 | } | 296 | } |
270 | 297 | ||
271 | void smp_call_function_single_interrupt(struct pt_regs *regs) | 298 | void smp_call_function_interrupt(struct pt_regs *regs) |
299 | { | ||
300 | smp_entering_irq(); | ||
301 | __smp_call_function_interrupt(); | ||
302 | exiting_irq(); | ||
303 | } | ||
304 | |||
305 | void smp_trace_call_function_interrupt(struct pt_regs *regs) | ||
306 | { | ||
307 | smp_entering_irq(); | ||
308 | trace_call_function_entry(CALL_FUNCTION_VECTOR); | ||
309 | __smp_call_function_interrupt(); | ||
310 | trace_call_function_exit(CALL_FUNCTION_VECTOR); | ||
311 | exiting_irq(); | ||
312 | } | ||
313 | |||
314 | static inline void __smp_call_function_single_interrupt(void) | ||
272 | { | 315 | { |
273 | ack_APIC_irq(); | ||
274 | irq_enter(); | ||
275 | generic_smp_call_function_single_interrupt(); | 316 | generic_smp_call_function_single_interrupt(); |
276 | inc_irq_stat(irq_call_count); | 317 | inc_irq_stat(irq_call_count); |
277 | irq_exit(); | 318 | } |
319 | |||
320 | void smp_call_function_single_interrupt(struct pt_regs *regs) | ||
321 | { | ||
322 | smp_entering_irq(); | ||
323 | __smp_call_function_single_interrupt(); | ||
324 | exiting_irq(); | ||
325 | } | ||
326 | |||
327 | void smp_trace_call_function_single_interrupt(struct pt_regs *regs) | ||
328 | { | ||
329 | smp_entering_irq(); | ||
330 | trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); | ||
331 | __smp_call_function_single_interrupt(); | ||
332 | trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); | ||
333 | exiting_irq(); | ||
278 | } | 334 | } |
279 | 335 | ||
280 | static int __init nonmi_ipi_setup(char *str) | 336 | static int __init nonmi_ipi_setup(char *str) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index bfd348e99369..aecc98a93d1b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -130,7 +130,7 @@ atomic_t init_deasserted; | |||
130 | * Report back to the Boot Processor during boot time or to the caller processor | 130 | * Report back to the Boot Processor during boot time or to the caller processor |
131 | * during CPU online. | 131 | * during CPU online. |
132 | */ | 132 | */ |
133 | static void __cpuinit smp_callin(void) | 133 | static void smp_callin(void) |
134 | { | 134 | { |
135 | int cpuid, phys_id; | 135 | int cpuid, phys_id; |
136 | unsigned long timeout; | 136 | unsigned long timeout; |
@@ -237,7 +237,7 @@ static int enable_start_cpu0; | |||
237 | /* | 237 | /* |
238 | * Activate a secondary processor. | 238 | * Activate a secondary processor. |
239 | */ | 239 | */ |
240 | notrace static void __cpuinit start_secondary(void *unused) | 240 | static void notrace start_secondary(void *unused) |
241 | { | 241 | { |
242 | /* | 242 | /* |
243 | * Don't put *anything* before cpu_init(), SMP booting is too | 243 | * Don't put *anything* before cpu_init(), SMP booting is too |
@@ -300,7 +300,7 @@ void __init smp_store_boot_cpu_info(void) | |||
300 | * The bootstrap kernel entry code has set these up. Save them for | 300 | * The bootstrap kernel entry code has set these up. Save them for |
301 | * a given CPU | 301 | * a given CPU |
302 | */ | 302 | */ |
303 | void __cpuinit smp_store_cpu_info(int id) | 303 | void smp_store_cpu_info(int id) |
304 | { | 304 | { |
305 | struct cpuinfo_x86 *c = &cpu_data(id); | 305 | struct cpuinfo_x86 *c = &cpu_data(id); |
306 | 306 | ||
@@ -313,7 +313,7 @@ void __cpuinit smp_store_cpu_info(int id) | |||
313 | identify_secondary_cpu(c); | 313 | identify_secondary_cpu(c); |
314 | } | 314 | } |
315 | 315 | ||
316 | static bool __cpuinit | 316 | static bool |
317 | topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) | 317 | topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) |
318 | { | 318 | { |
319 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | 319 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
@@ -330,7 +330,7 @@ do { \ | |||
330 | cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ | 330 | cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ |
331 | } while (0) | 331 | } while (0) |
332 | 332 | ||
333 | static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 333 | static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
334 | { | 334 | { |
335 | if (cpu_has_topoext) { | 335 | if (cpu_has_topoext) { |
336 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | 336 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
@@ -348,7 +348,7 @@ static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
348 | return false; | 348 | return false; |
349 | } | 349 | } |
350 | 350 | ||
351 | static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 351 | static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
352 | { | 352 | { |
353 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | 353 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
354 | 354 | ||
@@ -359,7 +359,7 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
359 | return false; | 359 | return false; |
360 | } | 360 | } |
361 | 361 | ||
362 | static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 362 | static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
363 | { | 363 | { |
364 | if (c->phys_proc_id == o->phys_proc_id) { | 364 | if (c->phys_proc_id == o->phys_proc_id) { |
365 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) | 365 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) |
@@ -370,7 +370,7 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
370 | return false; | 370 | return false; |
371 | } | 371 | } |
372 | 372 | ||
373 | void __cpuinit set_cpu_sibling_map(int cpu) | 373 | void set_cpu_sibling_map(int cpu) |
374 | { | 374 | { |
375 | bool has_smt = smp_num_siblings > 1; | 375 | bool has_smt = smp_num_siblings > 1; |
376 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; | 376 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; |
@@ -499,7 +499,7 @@ void __inquire_remote_apic(int apicid) | |||
499 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | 499 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this |
500 | * won't ... remember to clear down the APIC, etc later. | 500 | * won't ... remember to clear down the APIC, etc later. |
501 | */ | 501 | */ |
502 | int __cpuinit | 502 | int |
503 | wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) | 503 | wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) |
504 | { | 504 | { |
505 | unsigned long send_status, accept_status = 0; | 505 | unsigned long send_status, accept_status = 0; |
@@ -533,7 +533,7 @@ wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) | |||
533 | return (send_status | accept_status); | 533 | return (send_status | accept_status); |
534 | } | 534 | } |
535 | 535 | ||
536 | static int __cpuinit | 536 | static int |
537 | wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | 537 | wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) |
538 | { | 538 | { |
539 | unsigned long send_status, accept_status = 0; | 539 | unsigned long send_status, accept_status = 0; |
@@ -649,7 +649,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | |||
649 | } | 649 | } |
650 | 650 | ||
651 | /* reduce the number of lines printed when booting a large cpu count system */ | 651 | /* reduce the number of lines printed when booting a large cpu count system */ |
652 | static void __cpuinit announce_cpu(int cpu, int apicid) | 652 | static void announce_cpu(int cpu, int apicid) |
653 | { | 653 | { |
654 | static int current_node = -1; | 654 | static int current_node = -1; |
655 | int node = early_cpu_to_node(cpu); | 655 | int node = early_cpu_to_node(cpu); |
@@ -691,7 +691,7 @@ static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs) | |||
691 | * We'll change this code in the future to wake up hard offlined CPU0 if | 691 | * We'll change this code in the future to wake up hard offlined CPU0 if |
692 | * real platform and request are available. | 692 | * real platform and request are available. |
693 | */ | 693 | */ |
694 | static int __cpuinit | 694 | static int |
695 | wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, | 695 | wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, |
696 | int *cpu0_nmi_registered) | 696 | int *cpu0_nmi_registered) |
697 | { | 697 | { |
@@ -731,7 +731,7 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, | |||
731 | * Returns zero if CPU booted OK, else error code from | 731 | * Returns zero if CPU booted OK, else error code from |
732 | * ->wakeup_secondary_cpu. | 732 | * ->wakeup_secondary_cpu. |
733 | */ | 733 | */ |
734 | static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) | 734 | static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) |
735 | { | 735 | { |
736 | volatile u32 *trampoline_status = | 736 | volatile u32 *trampoline_status = |
737 | (volatile u32 *) __va(real_mode_header->trampoline_status); | 737 | (volatile u32 *) __va(real_mode_header->trampoline_status); |
@@ -872,7 +872,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) | |||
872 | return boot_error; | 872 | return boot_error; |
873 | } | 873 | } |
874 | 874 | ||
875 | int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) | 875 | int native_cpu_up(unsigned int cpu, struct task_struct *tidle) |
876 | { | 876 | { |
877 | int apicid = apic->cpu_present_to_apicid(cpu); | 877 | int apicid = apic->cpu_present_to_apicid(cpu); |
878 | unsigned long flags; | 878 | unsigned long flags; |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index dbded5aedb81..48f8375e4c6b 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
101 | *begin = new_begin; | 101 | *begin = new_begin; |
102 | } | 102 | } |
103 | } else { | 103 | } else { |
104 | *begin = TASK_UNMAPPED_BASE; | 104 | *begin = mmap_legacy_base(); |
105 | *end = TASK_SIZE; | 105 | *end = TASK_SIZE; |
106 | } | 106 | } |
107 | } | 107 | } |
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index f84fe00fad48..addf7b58f4e8 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/pfn.h> | 31 | #include <linux/pfn.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/tboot.h> | 33 | #include <linux/tboot.h> |
34 | #include <linux/debugfs.h> | ||
34 | 35 | ||
35 | #include <asm/realmode.h> | 36 | #include <asm/realmode.h> |
36 | #include <asm/processor.h> | 37 | #include <asm/processor.h> |
@@ -319,8 +320,8 @@ static int tboot_wait_for_aps(int num_aps) | |||
319 | return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); | 320 | return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); |
320 | } | 321 | } |
321 | 322 | ||
322 | static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, | 323 | static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action, |
323 | unsigned long action, void *hcpu) | 324 | void *hcpu) |
324 | { | 325 | { |
325 | switch (action) { | 326 | switch (action) { |
326 | case CPU_DYING: | 327 | case CPU_DYING: |
@@ -333,11 +334,78 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, | |||
333 | return NOTIFY_OK; | 334 | return NOTIFY_OK; |
334 | } | 335 | } |
335 | 336 | ||
336 | static struct notifier_block tboot_cpu_notifier __cpuinitdata = | 337 | static struct notifier_block tboot_cpu_notifier = |
337 | { | 338 | { |
338 | .notifier_call = tboot_cpu_callback, | 339 | .notifier_call = tboot_cpu_callback, |
339 | }; | 340 | }; |
340 | 341 | ||
342 | #ifdef CONFIG_DEBUG_FS | ||
343 | |||
344 | #define TBOOT_LOG_UUID { 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \ | ||
345 | 0x4c, 0x84, 0xa3, 0xe9, 0x53, 0xb8, 0x81, 0x74 } | ||
346 | |||
347 | #define TBOOT_SERIAL_LOG_ADDR 0x60000 | ||
348 | #define TBOOT_SERIAL_LOG_SIZE 0x08000 | ||
349 | #define LOG_MAX_SIZE_OFF 16 | ||
350 | #define LOG_BUF_OFF 24 | ||
351 | |||
352 | static uint8_t tboot_log_uuid[16] = TBOOT_LOG_UUID; | ||
353 | |||
354 | static ssize_t tboot_log_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) | ||
355 | { | ||
356 | void __iomem *log_base; | ||
357 | u8 log_uuid[16]; | ||
358 | u32 max_size; | ||
359 | void *kbuf; | ||
360 | int ret = -EFAULT; | ||
361 | |||
362 | log_base = ioremap_nocache(TBOOT_SERIAL_LOG_ADDR, TBOOT_SERIAL_LOG_SIZE); | ||
363 | if (!log_base) | ||
364 | return ret; | ||
365 | |||
366 | memcpy_fromio(log_uuid, log_base, sizeof(log_uuid)); | ||
367 | if (memcmp(&tboot_log_uuid, log_uuid, sizeof(log_uuid))) | ||
368 | goto err_iounmap; | ||
369 | |||
370 | max_size = readl(log_base + LOG_MAX_SIZE_OFF); | ||
371 | if (*ppos >= max_size) { | ||
372 | ret = 0; | ||
373 | goto err_iounmap; | ||
374 | } | ||
375 | |||
376 | if (*ppos + count > max_size) | ||
377 | count = max_size - *ppos; | ||
378 | |||
379 | kbuf = kmalloc(count, GFP_KERNEL); | ||
380 | if (!kbuf) { | ||
381 | ret = -ENOMEM; | ||
382 | goto err_iounmap; | ||
383 | } | ||
384 | |||
385 | memcpy_fromio(kbuf, log_base + LOG_BUF_OFF + *ppos, count); | ||
386 | if (copy_to_user(user_buf, kbuf, count)) | ||
387 | goto err_kfree; | ||
388 | |||
389 | *ppos += count; | ||
390 | |||
391 | ret = count; | ||
392 | |||
393 | err_kfree: | ||
394 | kfree(kbuf); | ||
395 | |||
396 | err_iounmap: | ||
397 | iounmap(log_base); | ||
398 | |||
399 | return ret; | ||
400 | } | ||
401 | |||
402 | static const struct file_operations tboot_log_fops = { | ||
403 | .read = tboot_log_read, | ||
404 | .llseek = default_llseek, | ||
405 | }; | ||
406 | |||
407 | #endif /* CONFIG_DEBUG_FS */ | ||
408 | |||
341 | static __init int tboot_late_init(void) | 409 | static __init int tboot_late_init(void) |
342 | { | 410 | { |
343 | if (!tboot_enabled()) | 411 | if (!tboot_enabled()) |
@@ -348,6 +416,11 @@ static __init int tboot_late_init(void) | |||
348 | atomic_set(&ap_wfs_count, 0); | 416 | atomic_set(&ap_wfs_count, 0); |
349 | register_hotcpu_notifier(&tboot_cpu_notifier); | 417 | register_hotcpu_notifier(&tboot_cpu_notifier); |
350 | 418 | ||
419 | #ifdef CONFIG_DEBUG_FS | ||
420 | debugfs_create_file("tboot_log", S_IRUSR, | ||
421 | arch_debugfs_dir, NULL, &tboot_log_fops); | ||
422 | #endif | ||
423 | |||
351 | acpi_os_set_prepare_sleep(&tboot_sleep); | 424 | acpi_os_set_prepare_sleep(&tboot_sleep); |
352 | return 0; | 425 | return 0; |
353 | } | 426 | } |
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c new file mode 100644 index 000000000000..1c113db9ed57 --- /dev/null +++ b/arch/x86/kernel/tracepoint.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Code for supporting irq vector tracepoints. | ||
3 | * | ||
4 | * Copyright (C) 2013 Seiji Aguchi <seiji.aguchi@hds.com> | ||
5 | * | ||
6 | */ | ||
7 | #include <asm/hw_irq.h> | ||
8 | #include <asm/desc.h> | ||
9 | #include <linux/atomic.h> | ||
10 | |||
11 | atomic_t trace_idt_ctr = ATOMIC_INIT(0); | ||
12 | struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, | ||
13 | (unsigned long) trace_idt_table }; | ||
14 | |||
15 | /* No need to be aligned, but done to keep all IDTs defined the same way. */ | ||
16 | gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss; | ||
17 | |||
18 | static int trace_irq_vector_refcount; | ||
19 | static DEFINE_MUTEX(irq_vector_mutex); | ||
20 | |||
21 | static void set_trace_idt_ctr(int val) | ||
22 | { | ||
23 | atomic_set(&trace_idt_ctr, val); | ||
24 | /* Ensure the trace_idt_ctr is set before sending IPI */ | ||
25 | wmb(); | ||
26 | } | ||
27 | |||
28 | static void switch_idt(void *arg) | ||
29 | { | ||
30 | unsigned long flags; | ||
31 | |||
32 | local_irq_save(flags); | ||
33 | load_current_idt(); | ||
34 | local_irq_restore(flags); | ||
35 | } | ||
36 | |||
37 | void trace_irq_vector_regfunc(void) | ||
38 | { | ||
39 | mutex_lock(&irq_vector_mutex); | ||
40 | if (!trace_irq_vector_refcount) { | ||
41 | set_trace_idt_ctr(1); | ||
42 | smp_call_function(switch_idt, NULL, 0); | ||
43 | switch_idt(NULL); | ||
44 | } | ||
45 | trace_irq_vector_refcount++; | ||
46 | mutex_unlock(&irq_vector_mutex); | ||
47 | } | ||
48 | |||
49 | void trace_irq_vector_unregfunc(void) | ||
50 | { | ||
51 | mutex_lock(&irq_vector_mutex); | ||
52 | trace_irq_vector_refcount--; | ||
53 | if (!trace_irq_vector_refcount) { | ||
54 | set_trace_idt_ctr(0); | ||
55 | smp_call_function(switch_idt, NULL, 0); | ||
56 | switch_idt(NULL); | ||
57 | } | ||
58 | mutex_unlock(&irq_vector_mutex); | ||
59 | } | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 772e2a846dec..1b23a1c92746 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -63,19 +63,19 @@ | |||
63 | #include <asm/x86_init.h> | 63 | #include <asm/x86_init.h> |
64 | #include <asm/pgalloc.h> | 64 | #include <asm/pgalloc.h> |
65 | #include <asm/proto.h> | 65 | #include <asm/proto.h> |
66 | |||
67 | /* No need to be aligned, but done to keep all IDTs defined the same way. */ | ||
68 | gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; | ||
66 | #else | 69 | #else |
67 | #include <asm/processor-flags.h> | 70 | #include <asm/processor-flags.h> |
68 | #include <asm/setup.h> | 71 | #include <asm/setup.h> |
69 | 72 | ||
70 | asmlinkage int system_call(void); | 73 | asmlinkage int system_call(void); |
71 | |||
72 | /* | ||
73 | * The IDT has to be page-aligned to simplify the Pentium | ||
74 | * F0 0F bug workaround. | ||
75 | */ | ||
76 | gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; | ||
77 | #endif | 74 | #endif |
78 | 75 | ||
76 | /* Must be page-aligned because the real IDT is used in a fixmap. */ | ||
77 | gate_desc idt_table[NR_VECTORS] __page_aligned_bss; | ||
78 | |||
79 | DECLARE_BITMAP(used_vectors, NR_VECTORS); | 79 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
80 | EXPORT_SYMBOL_GPL(used_vectors); | 80 | EXPORT_SYMBOL_GPL(used_vectors); |
81 | 81 | ||
@@ -254,6 +254,9 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) | |||
254 | tsk->thread.error_code = error_code; | 254 | tsk->thread.error_code = error_code; |
255 | tsk->thread.trap_nr = X86_TRAP_DF; | 255 | tsk->thread.trap_nr = X86_TRAP_DF; |
256 | 256 | ||
257 | #ifdef CONFIG_DOUBLEFAULT | ||
258 | df_debug(regs, error_code); | ||
259 | #endif | ||
257 | /* | 260 | /* |
258 | * This is always a kernel trap and never fixable (and thus must | 261 | * This is always a kernel trap and never fixable (and thus must |
259 | * never return). | 262 | * never return). |
@@ -437,7 +440,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
437 | /* Store the virtualized DR6 value */ | 440 | /* Store the virtualized DR6 value */ |
438 | tsk->thread.debugreg6 = dr6; | 441 | tsk->thread.debugreg6 = dr6; |
439 | 442 | ||
440 | if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, | 443 | if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, |
441 | SIGTRAP) == NOTIFY_STOP) | 444 | SIGTRAP) == NOTIFY_STOP) |
442 | goto exit; | 445 | goto exit; |
443 | 446 | ||
@@ -785,7 +788,7 @@ void __init trap_init(void) | |||
785 | x86_init.irqs.trap_init(); | 788 | x86_init.irqs.trap_init(); |
786 | 789 | ||
787 | #ifdef CONFIG_X86_64 | 790 | #ifdef CONFIG_X86_64 |
788 | memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); | 791 | memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); |
789 | set_nmi_gate(X86_TRAP_DB, &debug); | 792 | set_nmi_gate(X86_TRAP_DB, &debug); |
790 | set_nmi_gate(X86_TRAP_BP, &int3); | 793 | set_nmi_gate(X86_TRAP_BP, &int3); |
791 | #endif | 794 | #endif |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 098b3cfda72e..6ff49247edf8 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -824,7 +824,7 @@ static void __init check_system_tsc_reliable(void) | |||
824 | * Make an educated guess if the TSC is trustworthy and synchronized | 824 | * Make an educated guess if the TSC is trustworthy and synchronized |
825 | * over all CPUs. | 825 | * over all CPUs. |
826 | */ | 826 | */ |
827 | __cpuinit int unsynchronized_tsc(void) | 827 | int unsynchronized_tsc(void) |
828 | { | 828 | { |
829 | if (!cpu_has_tsc || tsc_unstable) | 829 | if (!cpu_has_tsc || tsc_unstable) |
830 | return 1; | 830 | return 1; |
@@ -1020,7 +1020,7 @@ void __init tsc_init(void) | |||
1020 | * been calibrated. This assumes that CONSTANT_TSC applies to all | 1020 | * been calibrated. This assumes that CONSTANT_TSC applies to all |
1021 | * cpus in the socket - this should be a safe assumption. | 1021 | * cpus in the socket - this should be a safe assumption. |
1022 | */ | 1022 | */ |
1023 | unsigned long __cpuinit calibrate_delay_is_known(void) | 1023 | unsigned long calibrate_delay_is_known(void) |
1024 | { | 1024 | { |
1025 | int i, cpu = smp_processor_id(); | 1025 | int i, cpu = smp_processor_id(); |
1026 | 1026 | ||
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index fc25e60a5884..adfdf56a3714 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -25,24 +25,24 @@ | |||
25 | * Entry/exit counters that make sure that both CPUs | 25 | * Entry/exit counters that make sure that both CPUs |
26 | * run the measurement code at once: | 26 | * run the measurement code at once: |
27 | */ | 27 | */ |
28 | static __cpuinitdata atomic_t start_count; | 28 | static atomic_t start_count; |
29 | static __cpuinitdata atomic_t stop_count; | 29 | static atomic_t stop_count; |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * We use a raw spinlock in this exceptional case, because | 32 | * We use a raw spinlock in this exceptional case, because |
33 | * we want to have the fastest, inlined, non-debug version | 33 | * we want to have the fastest, inlined, non-debug version |
34 | * of a critical section, to be able to prove TSC time-warps: | 34 | * of a critical section, to be able to prove TSC time-warps: |
35 | */ | 35 | */ |
36 | static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 36 | static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
37 | 37 | ||
38 | static __cpuinitdata cycles_t last_tsc; | 38 | static cycles_t last_tsc; |
39 | static __cpuinitdata cycles_t max_warp; | 39 | static cycles_t max_warp; |
40 | static __cpuinitdata int nr_warps; | 40 | static int nr_warps; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * TSC-warp measurement loop running on both CPUs: | 43 | * TSC-warp measurement loop running on both CPUs: |
44 | */ | 44 | */ |
45 | static __cpuinit void check_tsc_warp(unsigned int timeout) | 45 | static void check_tsc_warp(unsigned int timeout) |
46 | { | 46 | { |
47 | cycles_t start, now, prev, end; | 47 | cycles_t start, now, prev, end; |
48 | int i; | 48 | int i; |
@@ -121,7 +121,7 @@ static inline unsigned int loop_timeout(int cpu) | |||
121 | * Source CPU calls into this - it waits for the freshly booted | 121 | * Source CPU calls into this - it waits for the freshly booted |
122 | * target CPU to arrive and then starts the measurement: | 122 | * target CPU to arrive and then starts the measurement: |
123 | */ | 123 | */ |
124 | void __cpuinit check_tsc_sync_source(int cpu) | 124 | void check_tsc_sync_source(int cpu) |
125 | { | 125 | { |
126 | int cpus = 2; | 126 | int cpus = 2; |
127 | 127 | ||
@@ -187,7 +187,7 @@ void __cpuinit check_tsc_sync_source(int cpu) | |||
187 | /* | 187 | /* |
188 | * Freshly booted CPUs call into this: | 188 | * Freshly booted CPUs call into this: |
189 | */ | 189 | */ |
190 | void __cpuinit check_tsc_sync_target(void) | 190 | void check_tsc_sync_target(void) |
191 | { | 191 | { |
192 | int cpus = 2; | 192 | int cpus = 2; |
193 | 193 | ||
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 9a907a67be8f..1f96f9347ed9 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -331,7 +331,7 @@ sigsegv: | |||
331 | * Assume __initcall executes before all user space. Hopefully kmod | 331 | * Assume __initcall executes before all user space. Hopefully kmod |
332 | * doesn't violate that. We'll find out if it does. | 332 | * doesn't violate that. We'll find out if it does. |
333 | */ | 333 | */ |
334 | static void __cpuinit vsyscall_set_cpu(int cpu) | 334 | static void vsyscall_set_cpu(int cpu) |
335 | { | 335 | { |
336 | unsigned long d; | 336 | unsigned long d; |
337 | unsigned long node = 0; | 337 | unsigned long node = 0; |
@@ -353,13 +353,13 @@ static void __cpuinit vsyscall_set_cpu(int cpu) | |||
353 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); | 353 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); |
354 | } | 354 | } |
355 | 355 | ||
356 | static void __cpuinit cpu_vsyscall_init(void *arg) | 356 | static void cpu_vsyscall_init(void *arg) |
357 | { | 357 | { |
358 | /* preemption should be already off */ | 358 | /* preemption should be already off */ |
359 | vsyscall_set_cpu(raw_smp_processor_id()); | 359 | vsyscall_set_cpu(raw_smp_processor_id()); |
360 | } | 360 | } |
361 | 361 | ||
362 | static int __cpuinit | 362 | static int |
363 | cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | 363 | cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) |
364 | { | 364 | { |
365 | long cpu = (long)arg; | 365 | long cpu = (long)arg; |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 45a14dbbddaf..5f24c71accaa 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/iommu.h> | 25 | #include <asm/iommu.h> |
26 | #include <asm/mach_traps.h> | 26 | #include <asm/mach_traps.h> |
27 | 27 | ||
28 | void __cpuinit x86_init_noop(void) { } | 28 | void x86_init_noop(void) { } |
29 | void __init x86_init_uint_noop(unsigned int unused) { } | 29 | void __init x86_init_uint_noop(unsigned int unused) { } |
30 | int __init iommu_init_noop(void) { return 0; } | 30 | int __init iommu_init_noop(void) { return 0; } |
31 | void iommu_shutdown_noop(void) { } | 31 | void iommu_shutdown_noop(void) { } |
@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = { | |||
85 | }, | 85 | }, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { | 88 | struct x86_cpuinit_ops x86_cpuinit = { |
89 | .early_percpu_clock_init = x86_init_noop, | 89 | .early_percpu_clock_init = x86_init_noop, |
90 | .setup_percpu_clockev = setup_secondary_APIC_clock, | 90 | .setup_percpu_clockev = setup_secondary_APIC_clock, |
91 | }; | 91 | }; |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index ada87a329edc..422fd8223470 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -243,7 +243,7 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
243 | if (!access_ok(VERIFY_WRITE, buf, size)) | 243 | if (!access_ok(VERIFY_WRITE, buf, size)) |
244 | return -EACCES; | 244 | return -EACCES; |
245 | 245 | ||
246 | if (!HAVE_HWFP) | 246 | if (!static_cpu_has(X86_FEATURE_FPU)) |
247 | return fpregs_soft_get(current, NULL, 0, | 247 | return fpregs_soft_get(current, NULL, 0, |
248 | sizeof(struct user_i387_ia32_struct), NULL, | 248 | sizeof(struct user_i387_ia32_struct), NULL, |
249 | (struct _fpstate_ia32 __user *) buf) ? -1 : 1; | 249 | (struct _fpstate_ia32 __user *) buf) ? -1 : 1; |
@@ -350,11 +350,10 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
350 | if (!used_math() && init_fpu(tsk)) | 350 | if (!used_math() && init_fpu(tsk)) |
351 | return -1; | 351 | return -1; |
352 | 352 | ||
353 | if (!HAVE_HWFP) { | 353 | if (!static_cpu_has(X86_FEATURE_FPU)) |
354 | return fpregs_soft_set(current, NULL, | 354 | return fpregs_soft_set(current, NULL, |
355 | 0, sizeof(struct user_i387_ia32_struct), | 355 | 0, sizeof(struct user_i387_ia32_struct), |
356 | NULL, buf) != 0; | 356 | NULL, buf) != 0; |
357 | } | ||
358 | 357 | ||
359 | if (use_xsave()) { | 358 | if (use_xsave()) { |
360 | struct _fpx_sw_bytes fx_sw_user; | 359 | struct _fpx_sw_bytes fx_sw_user; |
@@ -574,7 +573,7 @@ static void __init xstate_enable_boot_cpu(void) | |||
574 | * This is somewhat obfuscated due to the lack of powerful enough | 573 | * This is somewhat obfuscated due to the lack of powerful enough |
575 | * overrides for the section checks. | 574 | * overrides for the section checks. |
576 | */ | 575 | */ |
577 | void __cpuinit xsave_init(void) | 576 | void xsave_init(void) |
578 | { | 577 | { |
579 | static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; | 578 | static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; |
580 | void (*this_func)(void); | 579 | void (*this_func)(void); |
@@ -595,7 +594,7 @@ static inline void __init eager_fpu_init_bp(void) | |||
595 | setup_init_fpu_buf(); | 594 | setup_init_fpu_buf(); |
596 | } | 595 | } |
597 | 596 | ||
598 | void __cpuinit eager_fpu_init(void) | 597 | void eager_fpu_init(void) |
599 | { | 598 | { |
600 | static __refdata void (*boot_func)(void) = eager_fpu_init_bp; | 599 | static __refdata void (*boot_func)(void) = eager_fpu_init_bp; |
601 | 600 | ||