diff options
author | Len Brown <len.brown@intel.com> | 2011-03-23 02:34:54 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2011-03-23 02:34:54 -0400 |
commit | 02e2407858fd62053bf60349c0e72cd1c7a4a60e (patch) | |
tree | 0ebdbddc97d3abbc675916010e7771065b70c137 /arch/x86/kernel | |
parent | 96e1c408ea8a556c5b51e0e7d56bd2afbfbf5fe9 (diff) | |
parent | 6447f55da90b77faec1697d499ed7986bb4f6de6 (diff) |
Merge branch 'linus' into release
Conflicts:
arch/x86/kernel/acpi/sleep.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch/x86/kernel')
98 files changed, 2548 insertions, 1508 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 34244b2cd880..7338ef2218bc 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -41,13 +41,13 @@ obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | |||
41 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o | 41 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o |
42 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o | 42 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o |
43 | obj-y += bootflag.o e820.o | 43 | obj-y += bootflag.o e820.o |
44 | obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o | 44 | obj-y += pci-dma.o quirks.o topology.o kdebugfs.o |
45 | obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o | 45 | obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o |
46 | obj-y += tsc.o io_delay.o rtc.o | 46 | obj-y += tsc.o io_delay.o rtc.o |
47 | obj-y += pci-iommu_table.o | 47 | obj-y += pci-iommu_table.o |
48 | obj-y += resource.o | 48 | obj-y += resource.o |
49 | 49 | ||
50 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | 50 | obj-y += trampoline.o trampoline_$(BITS).o |
51 | obj-y += process.o | 51 | obj-y += process.o |
52 | obj-y += i387.o xsave.o | 52 | obj-y += i387.o xsave.o |
53 | obj-y += ptrace.o | 53 | obj-y += ptrace.o |
@@ -55,10 +55,12 @@ obj-$(CONFIG_X86_32) += tls.o | |||
55 | obj-$(CONFIG_IA32_EMULATION) += tls.o | 55 | obj-$(CONFIG_IA32_EMULATION) += tls.o |
56 | obj-y += step.o | 56 | obj-y += step.o |
57 | obj-$(CONFIG_INTEL_TXT) += tboot.o | 57 | obj-$(CONFIG_INTEL_TXT) += tboot.o |
58 | obj-$(CONFIG_ISA_DMA_API) += i8237.o | ||
58 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 59 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
59 | obj-y += cpu/ | 60 | obj-y += cpu/ |
60 | obj-y += acpi/ | 61 | obj-y += acpi/ |
61 | obj-y += reboot.o | 62 | obj-y += reboot.o |
63 | obj-$(CONFIG_X86_32) += reboot_32.o | ||
62 | obj-$(CONFIG_MCA) += mca_32.o | 64 | obj-$(CONFIG_MCA) += mca_32.o |
63 | obj-$(CONFIG_X86_MSR) += msr.o | 65 | obj-$(CONFIG_X86_MSR) += msr.o |
64 | obj-$(CONFIG_X86_CPUID) += cpuid.o | 66 | obj-$(CONFIG_X86_CPUID) += cpuid.o |
@@ -66,10 +68,9 @@ obj-$(CONFIG_PCI) += early-quirks.o | |||
66 | apm-y := apm_32.o | 68 | apm-y := apm_32.o |
67 | obj-$(CONFIG_APM) += apm.o | 69 | obj-$(CONFIG_APM) += apm.o |
68 | obj-$(CONFIG_SMP) += smp.o | 70 | obj-$(CONFIG_SMP) += smp.o |
69 | obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o | 71 | obj-$(CONFIG_SMP) += smpboot.o |
72 | obj-$(CONFIG_SMP) += tsc_sync.o | ||
70 | obj-$(CONFIG_SMP) += setup_percpu.o | 73 | obj-$(CONFIG_SMP) += setup_percpu.o |
71 | obj-$(CONFIG_X86_64_SMP) += tsc_sync.o | ||
72 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o | ||
73 | obj-$(CONFIG_X86_MPPARSE) += mpparse.o | 74 | obj-$(CONFIG_X86_MPPARSE) += mpparse.o |
74 | obj-y += apic/ | 75 | obj-y += apic/ |
75 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 76 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
@@ -109,6 +110,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o | |||
109 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | 110 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o |
110 | 111 | ||
111 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 112 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
113 | obj-$(CONFIG_OF) += devicetree.o | ||
112 | 114 | ||
113 | ### | 115 | ### |
114 | # 64 bit specific files | 116 | # 64 bit specific files |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3e6e2d68f761..9a966c579af5 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -595,14 +595,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | |||
595 | nid = acpi_get_node(handle); | 595 | nid = acpi_get_node(handle); |
596 | if (nid == -1 || !node_online(nid)) | 596 | if (nid == -1 || !node_online(nid)) |
597 | return; | 597 | return; |
598 | #ifdef CONFIG_X86_64 | 598 | set_apicid_to_node(physid, nid); |
599 | apicid_to_node[physid] = nid; | ||
600 | numa_set_node(cpu, nid); | 599 | numa_set_node(cpu, nid); |
601 | #else /* CONFIG_X86_32 */ | ||
602 | apicid_2_node[physid] = nid; | ||
603 | cpu_to_node_map[cpu] = nid; | ||
604 | #endif | ||
605 | |||
606 | #endif | 600 | #endif |
607 | } | 601 | } |
608 | 602 | ||
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S index 28595d6df47c..ead21b663117 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.S | |||
@@ -6,11 +6,17 @@ | |||
6 | #include <asm/page_types.h> | 6 | #include <asm/page_types.h> |
7 | #include <asm/pgtable_types.h> | 7 | #include <asm/pgtable_types.h> |
8 | #include <asm/processor-flags.h> | 8 | #include <asm/processor-flags.h> |
9 | #include "wakeup.h" | ||
9 | 10 | ||
10 | .code16 | 11 | .code16 |
11 | .section ".header", "a" | 12 | .section ".jump", "ax" |
13 | .globl _start | ||
14 | _start: | ||
15 | cli | ||
16 | jmp wakeup_code | ||
12 | 17 | ||
13 | /* This should match the structure in wakeup.h */ | 18 | /* This should match the structure in wakeup.h */ |
19 | .section ".header", "a" | ||
14 | .globl wakeup_header | 20 | .globl wakeup_header |
15 | wakeup_header: | 21 | wakeup_header: |
16 | video_mode: .short 0 /* Video mode number */ | 22 | video_mode: .short 0 /* Video mode number */ |
@@ -30,14 +36,11 @@ wakeup_jmp: .byte 0xea /* ljmpw */ | |||
30 | wakeup_jmp_off: .word 3f | 36 | wakeup_jmp_off: .word 3f |
31 | wakeup_jmp_seg: .word 0 | 37 | wakeup_jmp_seg: .word 0 |
32 | wakeup_gdt: .quad 0, 0, 0 | 38 | wakeup_gdt: .quad 0, 0, 0 |
33 | signature: .long 0x51ee1111 | 39 | signature: .long WAKEUP_HEADER_SIGNATURE |
34 | 40 | ||
35 | .text | 41 | .text |
36 | .globl _start | ||
37 | .code16 | 42 | .code16 |
38 | wakeup_code: | 43 | wakeup_code: |
39 | _start: | ||
40 | cli | ||
41 | cld | 44 | cld |
42 | 45 | ||
43 | /* Apparently some dimwit BIOS programmers don't know how to | 46 | /* Apparently some dimwit BIOS programmers don't know how to |
@@ -77,12 +80,12 @@ _start: | |||
77 | 80 | ||
78 | /* Check header signature... */ | 81 | /* Check header signature... */ |
79 | movl signature, %eax | 82 | movl signature, %eax |
80 | cmpl $0x51ee1111, %eax | 83 | cmpl $WAKEUP_HEADER_SIGNATURE, %eax |
81 | jne bogus_real_magic | 84 | jne bogus_real_magic |
82 | 85 | ||
83 | /* Check we really have everything... */ | 86 | /* Check we really have everything... */ |
84 | movl end_signature, %eax | 87 | movl end_signature, %eax |
85 | cmpl $0x65a22c82, %eax | 88 | cmpl $WAKEUP_END_SIGNATURE, %eax |
86 | jne bogus_real_magic | 89 | jne bogus_real_magic |
87 | 90 | ||
88 | /* Call the C code */ | 91 | /* Call the C code */ |
@@ -147,3 +150,7 @@ wakeup_heap: | |||
147 | wakeup_stack: | 150 | wakeup_stack: |
148 | .space 2048 | 151 | .space 2048 |
149 | wakeup_stack_end: | 152 | wakeup_stack_end: |
153 | |||
154 | .section ".signature","a" | ||
155 | end_signature: | ||
156 | .long WAKEUP_END_SIGNATURE | ||
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h index 69d38d0b2b64..e1828c07e79c 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.h +++ b/arch/x86/kernel/acpi/realmode/wakeup.h | |||
@@ -35,7 +35,8 @@ struct wakeup_header { | |||
35 | extern struct wakeup_header wakeup_header; | 35 | extern struct wakeup_header wakeup_header; |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #define HEADER_OFFSET 0x3f00 | 38 | #define WAKEUP_HEADER_OFFSET 8 |
39 | #define WAKEUP_SIZE 0x4000 | 39 | #define WAKEUP_HEADER_SIGNATURE 0x51ee1111 |
40 | #define WAKEUP_END_SIGNATURE 0x65a22c82 | ||
40 | 41 | ||
41 | #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ | 42 | #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ |
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S index 060fff8f5c5b..d4f8010a5b1b 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S | |||
@@ -13,9 +13,19 @@ ENTRY(_start) | |||
13 | SECTIONS | 13 | SECTIONS |
14 | { | 14 | { |
15 | . = 0; | 15 | . = 0; |
16 | .jump : { | ||
17 | *(.jump) | ||
18 | } = 0x90909090 | ||
19 | |||
20 | . = WAKEUP_HEADER_OFFSET; | ||
21 | .header : { | ||
22 | *(.header) | ||
23 | } | ||
24 | |||
25 | . = ALIGN(16); | ||
16 | .text : { | 26 | .text : { |
17 | *(.text*) | 27 | *(.text*) |
18 | } | 28 | } = 0x90909090 |
19 | 29 | ||
20 | . = ALIGN(16); | 30 | . = ALIGN(16); |
21 | .rodata : { | 31 | .rodata : { |
@@ -33,11 +43,6 @@ SECTIONS | |||
33 | *(.data*) | 43 | *(.data*) |
34 | } | 44 | } |
35 | 45 | ||
36 | .signature : { | ||
37 | end_signature = .; | ||
38 | LONG(0x65a22c82) | ||
39 | } | ||
40 | |||
41 | . = ALIGN(16); | 46 | . = ALIGN(16); |
42 | .bss : { | 47 | .bss : { |
43 | __bss_start = .; | 48 | __bss_start = .; |
@@ -45,20 +50,13 @@ SECTIONS | |||
45 | __bss_end = .; | 50 | __bss_end = .; |
46 | } | 51 | } |
47 | 52 | ||
48 | . = HEADER_OFFSET; | 53 | .signature : { |
49 | .header : { | 54 | *(.signature) |
50 | *(.header) | ||
51 | } | 55 | } |
52 | 56 | ||
53 | . = ALIGN(16); | ||
54 | _end = .; | 57 | _end = .; |
55 | 58 | ||
56 | /DISCARD/ : { | 59 | /DISCARD/ : { |
57 | *(.note*) | 60 | *(.note*) |
58 | } | 61 | } |
59 | |||
60 | /* | ||
61 | * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: | ||
62 | */ | ||
63 | . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); | ||
64 | } | 62 | } |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 5f1b747f6ef1..ff93bc1b09c3 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -18,12 +18,8 @@ | |||
18 | #include "realmode/wakeup.h" | 18 | #include "realmode/wakeup.h" |
19 | #include "sleep.h" | 19 | #include "sleep.h" |
20 | 20 | ||
21 | unsigned long acpi_wakeup_address; | ||
22 | unsigned long acpi_realmode_flags; | 21 | unsigned long acpi_realmode_flags; |
23 | 22 | ||
24 | /* address in low memory of the wakeup routine. */ | ||
25 | static unsigned long acpi_realmode; | ||
26 | |||
27 | #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) | 23 | #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) |
28 | static char temp_stack[4096]; | 24 | static char temp_stack[4096]; |
29 | #endif | 25 | #endif |
@@ -33,22 +29,17 @@ static char temp_stack[4096]; | |||
33 | * | 29 | * |
34 | * Create an identity mapped page table and copy the wakeup routine to | 30 | * Create an identity mapped page table and copy the wakeup routine to |
35 | * low memory. | 31 | * low memory. |
36 | * | ||
37 | * Note that this is too late to change acpi_wakeup_address. | ||
38 | */ | 32 | */ |
39 | int acpi_suspend_lowlevel(void) | 33 | int acpi_suspend_lowlevel(void) |
40 | { | 34 | { |
41 | struct wakeup_header *header; | 35 | struct wakeup_header *header; |
36 | /* address in low memory of the wakeup routine. */ | ||
37 | char *acpi_realmode; | ||
42 | 38 | ||
43 | if (!acpi_realmode) { | 39 | acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code); |
44 | printk(KERN_ERR "Could not allocate memory during boot, " | ||
45 | "S3 disabled\n"); | ||
46 | return -ENOMEM; | ||
47 | } | ||
48 | memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE); | ||
49 | 40 | ||
50 | header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET); | 41 | header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET); |
51 | if (header->signature != 0x51ee1111) { | 42 | if (header->signature != WAKEUP_HEADER_SIGNATURE) { |
52 | printk(KERN_ERR "wakeup header does not match\n"); | 43 | printk(KERN_ERR "wakeup header does not match\n"); |
53 | return -EINVAL; | 44 | return -EINVAL; |
54 | } | 45 | } |
@@ -68,9 +59,7 @@ int acpi_suspend_lowlevel(void) | |||
68 | /* GDT[0]: GDT self-pointer */ | 59 | /* GDT[0]: GDT self-pointer */ |
69 | header->wakeup_gdt[0] = | 60 | header->wakeup_gdt[0] = |
70 | (u64)(sizeof(header->wakeup_gdt) - 1) + | 61 | (u64)(sizeof(header->wakeup_gdt) - 1) + |
71 | ((u64)(acpi_wakeup_address + | 62 | ((u64)__pa(&header->wakeup_gdt) << 16); |
72 | ((char *)&header->wakeup_gdt - (char *)acpi_realmode)) | ||
73 | << 16); | ||
74 | /* GDT[1]: big real mode-like code segment */ | 63 | /* GDT[1]: big real mode-like code segment */ |
75 | header->wakeup_gdt[1] = | 64 | header->wakeup_gdt[1] = |
76 | GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff); | 65 | GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff); |
@@ -96,7 +85,7 @@ int acpi_suspend_lowlevel(void) | |||
96 | header->pmode_cr3 = (u32)__pa(&initial_page_table); | 85 | header->pmode_cr3 = (u32)__pa(&initial_page_table); |
97 | saved_magic = 0x12345678; | 86 | saved_magic = 0x12345678; |
98 | #else /* CONFIG_64BIT */ | 87 | #else /* CONFIG_64BIT */ |
99 | header->trampoline_segment = setup_trampoline() >> 4; | 88 | header->trampoline_segment = trampoline_address() >> 4; |
100 | #ifdef CONFIG_SMP | 89 | #ifdef CONFIG_SMP |
101 | stack_start = (unsigned long)temp_stack + sizeof(temp_stack); | 90 | stack_start = (unsigned long)temp_stack + sizeof(temp_stack); |
102 | early_gdt_descr.address = | 91 | early_gdt_descr.address = |
@@ -111,45 +100,6 @@ int acpi_suspend_lowlevel(void) | |||
111 | return 0; | 100 | return 0; |
112 | } | 101 | } |
113 | 102 | ||
114 | /** | ||
115 | * acpi_reserve_wakeup_memory - do _very_ early ACPI initialisation | ||
116 | * | ||
117 | * We allocate a page from the first 1MB of memory for the wakeup | ||
118 | * routine for when we come back from a sleep state. The | ||
119 | * runtime allocator allows specification of <16MB pages, but not | ||
120 | * <1MB pages. | ||
121 | */ | ||
122 | void __init acpi_reserve_wakeup_memory(void) | ||
123 | { | ||
124 | phys_addr_t mem; | ||
125 | |||
126 | if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { | ||
127 | printk(KERN_ERR | ||
128 | "ACPI: Wakeup code way too big, S3 disabled.\n"); | ||
129 | return; | ||
130 | } | ||
131 | |||
132 | mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); | ||
133 | |||
134 | if (mem == MEMBLOCK_ERROR) { | ||
135 | printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); | ||
136 | return; | ||
137 | } | ||
138 | acpi_realmode = (unsigned long) phys_to_virt(mem); | ||
139 | acpi_wakeup_address = mem; | ||
140 | memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); | ||
141 | } | ||
142 | |||
143 | int __init acpi_configure_wakeup_memory(void) | ||
144 | { | ||
145 | if (acpi_realmode) | ||
146 | set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | arch_initcall(acpi_configure_wakeup_memory); | ||
151 | |||
152 | |||
153 | static int __init acpi_sleep_setup(char *str) | 103 | static int __init acpi_sleep_setup(char *str) |
154 | { | 104 | { |
155 | while ((str != NULL) && (*str != '\0')) { | 105 | while ((str != NULL) && (*str != '\0')) { |
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index 31ce13f20297..416d4be13fef 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h | |||
@@ -4,13 +4,10 @@ | |||
4 | 4 | ||
5 | #include <asm/trampoline.h> | 5 | #include <asm/trampoline.h> |
6 | 6 | ||
7 | extern char wakeup_code_start, wakeup_code_end; | ||
8 | |||
9 | extern unsigned long saved_video_mode; | 7 | extern unsigned long saved_video_mode; |
10 | extern long saved_magic; | 8 | extern long saved_magic; |
11 | 9 | ||
12 | extern int wakeup_pmode_return; | 10 | extern int wakeup_pmode_return; |
13 | extern char swsusp_pg_dir[PAGE_SIZE]; | ||
14 | 11 | ||
15 | extern unsigned long acpi_copy_wakeup_routine(unsigned long); | 12 | extern unsigned long acpi_copy_wakeup_routine(unsigned long); |
16 | extern void wakeup_long64(void); | 13 | extern void wakeup_long64(void); |
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S index 6ff3b5730575..63b8ab524f2c 100644 --- a/arch/x86/kernel/acpi/wakeup_rm.S +++ b/arch/x86/kernel/acpi/wakeup_rm.S | |||
@@ -2,9 +2,11 @@ | |||
2 | * Wrapper script for the realmode binary as a transport object | 2 | * Wrapper script for the realmode binary as a transport object |
3 | * before copying to low memory. | 3 | * before copying to low memory. |
4 | */ | 4 | */ |
5 | .section ".rodata","a" | 5 | #include <asm/page_types.h> |
6 | .globl wakeup_code_start, wakeup_code_end | 6 | |
7 | wakeup_code_start: | 7 | .section ".x86_trampoline","a" |
8 | .balign PAGE_SIZE | ||
9 | .globl acpi_wakeup_code | ||
10 | acpi_wakeup_code: | ||
8 | .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin" | 11 | .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin" |
9 | wakeup_code_end: | 12 | .size acpi_wakeup_code, .-acpi_wakeup_code |
10 | .size wakeup_code_start, .-wakeup_code_start | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 7038b95d363f..4a234677e213 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -199,7 +199,7 @@ void *text_poke_early(void *addr, const void *opcode, size_t len); | |||
199 | 199 | ||
200 | /* Replace instructions with better alternatives for this CPU type. | 200 | /* Replace instructions with better alternatives for this CPU type. |
201 | This runs before SMP is initialized to avoid SMP problems with | 201 | This runs before SMP is initialized to avoid SMP problems with |
202 | self modifying code. This implies that assymetric systems where | 202 | self modifying code. This implies that asymmetric systems where |
203 | APs have less capabilities than the boot processor are not handled. | 203 | APs have less capabilities than the boot processor are not handled. |
204 | Tough. Make sure you disable such features by hand. */ | 204 | Tough. Make sure you disable such features by hand. */ |
205 | 205 | ||
@@ -620,7 +620,12 @@ static int __kprobes stop_machine_text_poke(void *data) | |||
620 | flush_icache_range((unsigned long)p->addr, | 620 | flush_icache_range((unsigned long)p->addr, |
621 | (unsigned long)p->addr + p->len); | 621 | (unsigned long)p->addr + p->len); |
622 | } | 622 | } |
623 | 623 | /* | |
624 | * Intel Archiecture Software Developer's Manual section 7.1.3 specifies | ||
625 | * that a core serializing instruction such as "cpuid" should be | ||
626 | * executed on _each_ core before the new instruction is made visible. | ||
627 | */ | ||
628 | sync_core(); | ||
624 | return 0; | 629 | return 0; |
625 | } | 630 | } |
626 | 631 | ||
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 0a99f7198bc3..6801959a8b2a 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -12,14 +12,19 @@ | |||
12 | 12 | ||
13 | static u32 *flush_words; | 13 | static u32 *flush_words; |
14 | 14 | ||
15 | struct pci_device_id amd_nb_misc_ids[] = { | 15 | const struct pci_device_id amd_nb_misc_ids[] = { |
16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, | 18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, |
19 | {} | 19 | {} |
20 | }; | 20 | }; |
21 | EXPORT_SYMBOL(amd_nb_misc_ids); | 21 | EXPORT_SYMBOL(amd_nb_misc_ids); |
22 | 22 | ||
23 | static struct pci_device_id amd_nb_link_ids[] = { | ||
24 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) }, | ||
25 | {} | ||
26 | }; | ||
27 | |||
23 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { | 28 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { |
24 | { 0x00, 0x18, 0x20 }, | 29 | { 0x00, 0x18, 0x20 }, |
25 | { 0xff, 0x00, 0x20 }, | 30 | { 0xff, 0x00, 0x20 }, |
@@ -31,7 +36,7 @@ struct amd_northbridge_info amd_northbridges; | |||
31 | EXPORT_SYMBOL(amd_northbridges); | 36 | EXPORT_SYMBOL(amd_northbridges); |
32 | 37 | ||
33 | static struct pci_dev *next_northbridge(struct pci_dev *dev, | 38 | static struct pci_dev *next_northbridge(struct pci_dev *dev, |
34 | struct pci_device_id *ids) | 39 | const struct pci_device_id *ids) |
35 | { | 40 | { |
36 | do { | 41 | do { |
37 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | 42 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); |
@@ -43,9 +48,9 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev, | |||
43 | 48 | ||
44 | int amd_cache_northbridges(void) | 49 | int amd_cache_northbridges(void) |
45 | { | 50 | { |
46 | int i = 0; | 51 | u16 i = 0; |
47 | struct amd_northbridge *nb; | 52 | struct amd_northbridge *nb; |
48 | struct pci_dev *misc; | 53 | struct pci_dev *misc, *link; |
49 | 54 | ||
50 | if (amd_nb_num()) | 55 | if (amd_nb_num()) |
51 | return 0; | 56 | return 0; |
@@ -64,10 +69,12 @@ int amd_cache_northbridges(void) | |||
64 | amd_northbridges.nb = nb; | 69 | amd_northbridges.nb = nb; |
65 | amd_northbridges.num = i; | 70 | amd_northbridges.num = i; |
66 | 71 | ||
67 | misc = NULL; | 72 | link = misc = NULL; |
68 | for (i = 0; i != amd_nb_num(); i++) { | 73 | for (i = 0; i != amd_nb_num(); i++) { |
69 | node_to_amd_nb(i)->misc = misc = | 74 | node_to_amd_nb(i)->misc = misc = |
70 | next_northbridge(misc, amd_nb_misc_ids); | 75 | next_northbridge(misc, amd_nb_misc_ids); |
76 | node_to_amd_nb(i)->link = link = | ||
77 | next_northbridge(link, amd_nb_link_ids); | ||
71 | } | 78 | } |
72 | 79 | ||
73 | /* some CPU families (e.g. family 0x11) do not support GART */ | 80 | /* some CPU families (e.g. family 0x11) do not support GART */ |
@@ -85,26 +92,95 @@ int amd_cache_northbridges(void) | |||
85 | boot_cpu_data.x86_mask >= 0x1)) | 92 | boot_cpu_data.x86_mask >= 0x1)) |
86 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | 93 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
87 | 94 | ||
95 | if (boot_cpu_data.x86 == 0x15) | ||
96 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | ||
97 | |||
98 | /* L3 cache partitioning is supported on family 0x15 */ | ||
99 | if (boot_cpu_data.x86 == 0x15) | ||
100 | amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; | ||
101 | |||
88 | return 0; | 102 | return 0; |
89 | } | 103 | } |
90 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); | 104 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); |
91 | 105 | ||
92 | /* Ignores subdevice/subvendor but as far as I can figure out | 106 | /* |
93 | they're useless anyways */ | 107 | * Ignores subdevice/subvendor but as far as I can figure out |
94 | int __init early_is_amd_nb(u32 device) | 108 | * they're useless anyways |
109 | */ | ||
110 | bool __init early_is_amd_nb(u32 device) | ||
95 | { | 111 | { |
96 | struct pci_device_id *id; | 112 | const struct pci_device_id *id; |
97 | u32 vendor = device & 0xffff; | 113 | u32 vendor = device & 0xffff; |
114 | |||
98 | device >>= 16; | 115 | device >>= 16; |
99 | for (id = amd_nb_misc_ids; id->vendor; id++) | 116 | for (id = amd_nb_misc_ids; id->vendor; id++) |
100 | if (vendor == id->vendor && device == id->device) | 117 | if (vendor == id->vendor && device == id->device) |
101 | return 1; | 118 | return true; |
119 | return false; | ||
120 | } | ||
121 | |||
122 | int amd_get_subcaches(int cpu) | ||
123 | { | ||
124 | struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; | ||
125 | unsigned int mask; | ||
126 | int cuid = 0; | ||
127 | |||
128 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
129 | return 0; | ||
130 | |||
131 | pci_read_config_dword(link, 0x1d4, &mask); | ||
132 | |||
133 | #ifdef CONFIG_SMP | ||
134 | cuid = cpu_data(cpu).compute_unit_id; | ||
135 | #endif | ||
136 | return (mask >> (4 * cuid)) & 0xf; | ||
137 | } | ||
138 | |||
139 | int amd_set_subcaches(int cpu, int mask) | ||
140 | { | ||
141 | static unsigned int reset, ban; | ||
142 | struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); | ||
143 | unsigned int reg; | ||
144 | int cuid = 0; | ||
145 | |||
146 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) | ||
147 | return -EINVAL; | ||
148 | |||
149 | /* if necessary, collect reset state of L3 partitioning and BAN mode */ | ||
150 | if (reset == 0) { | ||
151 | pci_read_config_dword(nb->link, 0x1d4, &reset); | ||
152 | pci_read_config_dword(nb->misc, 0x1b8, &ban); | ||
153 | ban &= 0x180000; | ||
154 | } | ||
155 | |||
156 | /* deactivate BAN mode if any subcaches are to be disabled */ | ||
157 | if (mask != 0xf) { | ||
158 | pci_read_config_dword(nb->misc, 0x1b8, ®); | ||
159 | pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); | ||
160 | } | ||
161 | |||
162 | #ifdef CONFIG_SMP | ||
163 | cuid = cpu_data(cpu).compute_unit_id; | ||
164 | #endif | ||
165 | mask <<= 4 * cuid; | ||
166 | mask |= (0xf ^ (1 << cuid)) << 26; | ||
167 | |||
168 | pci_write_config_dword(nb->link, 0x1d4, mask); | ||
169 | |||
170 | /* reset BAN mode if L3 partitioning returned to reset state */ | ||
171 | pci_read_config_dword(nb->link, 0x1d4, ®); | ||
172 | if (reg == reset) { | ||
173 | pci_read_config_dword(nb->misc, 0x1b8, ®); | ||
174 | reg &= ~0x180000; | ||
175 | pci_write_config_dword(nb->misc, 0x1b8, reg | ban); | ||
176 | } | ||
177 | |||
102 | return 0; | 178 | return 0; |
103 | } | 179 | } |
104 | 180 | ||
105 | int amd_cache_gart(void) | 181 | static int amd_cache_gart(void) |
106 | { | 182 | { |
107 | int i; | 183 | u16 i; |
108 | 184 | ||
109 | if (!amd_nb_has_feature(AMD_NB_GART)) | 185 | if (!amd_nb_has_feature(AMD_NB_GART)) |
110 | return 0; | 186 | return 0; |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 51d4e1663066..1293c709ee85 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -508,64 +508,12 @@ static int apbt_next_event(unsigned long delta, | |||
508 | return 0; | 508 | return 0; |
509 | } | 509 | } |
510 | 510 | ||
511 | /* | ||
512 | * APB timer clock is not in sync with pclk on Langwell, which translates to | ||
513 | * unreliable read value caused by sampling error. the error does not add up | ||
514 | * overtime and only happens when sampling a 0 as a 1 by mistake. so the time | ||
515 | * would go backwards. the following code is trying to prevent time traveling | ||
516 | * backwards. little bit paranoid. | ||
517 | */ | ||
518 | static cycle_t apbt_read_clocksource(struct clocksource *cs) | 511 | static cycle_t apbt_read_clocksource(struct clocksource *cs) |
519 | { | 512 | { |
520 | unsigned long t0, t1, t2; | 513 | unsigned long current_count; |
521 | static unsigned long last_read; | 514 | |
522 | 515 | current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE); | |
523 | bad_count: | 516 | return (cycle_t)~current_count; |
524 | t1 = apbt_readl(phy_cs_timer_id, | ||
525 | APBTMR_N_CURRENT_VALUE); | ||
526 | t2 = apbt_readl(phy_cs_timer_id, | ||
527 | APBTMR_N_CURRENT_VALUE); | ||
528 | if (unlikely(t1 < t2)) { | ||
529 | pr_debug("APBT: read current count error %lx:%lx:%lx\n", | ||
530 | t1, t2, t2 - t1); | ||
531 | goto bad_count; | ||
532 | } | ||
533 | /* | ||
534 | * check against cached last read, makes sure time does not go back. | ||
535 | * it could be a normal rollover but we will do tripple check anyway | ||
536 | */ | ||
537 | if (unlikely(t2 > last_read)) { | ||
538 | /* check if we have a normal rollover */ | ||
539 | unsigned long raw_intr_status = | ||
540 | apbt_readl_reg(APBTMRS_RAW_INT_STATUS); | ||
541 | /* | ||
542 | * cs timer interrupt is masked but raw intr bit is set if | ||
543 | * rollover occurs. then we read EOI reg to clear it. | ||
544 | */ | ||
545 | if (raw_intr_status & (1 << phy_cs_timer_id)) { | ||
546 | apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); | ||
547 | goto out; | ||
548 | } | ||
549 | pr_debug("APB CS going back %lx:%lx:%lx ", | ||
550 | t2, last_read, t2 - last_read); | ||
551 | bad_count_x3: | ||
552 | pr_debug("triple check enforced\n"); | ||
553 | t0 = apbt_readl(phy_cs_timer_id, | ||
554 | APBTMR_N_CURRENT_VALUE); | ||
555 | udelay(1); | ||
556 | t1 = apbt_readl(phy_cs_timer_id, | ||
557 | APBTMR_N_CURRENT_VALUE); | ||
558 | udelay(1); | ||
559 | t2 = apbt_readl(phy_cs_timer_id, | ||
560 | APBTMR_N_CURRENT_VALUE); | ||
561 | if ((t2 > t1) || (t1 > t0)) { | ||
562 | printk(KERN_ERR "Error: APB CS tripple check failed\n"); | ||
563 | goto bad_count_x3; | ||
564 | } | ||
565 | } | ||
566 | out: | ||
567 | last_read = t2; | ||
568 | return (cycle_t)~t2; | ||
569 | } | 517 | } |
570 | 518 | ||
571 | static int apbt_clocksource_register(void) | 519 | static int apbt_clocksource_register(void) |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 5955a7800a96..86d1ad4962a7 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/bootmem.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/mmzone.h> | 17 | #include <linux/mmzone.h> |
18 | #include <linux/pci_ids.h> | 18 | #include <linux/pci_ids.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
@@ -57,7 +57,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size) | |||
57 | static u32 __init allocate_aperture(void) | 57 | static u32 __init allocate_aperture(void) |
58 | { | 58 | { |
59 | u32 aper_size; | 59 | u32 aper_size; |
60 | void *p; | 60 | unsigned long addr; |
61 | 61 | ||
62 | /* aper_size should <= 1G */ | 62 | /* aper_size should <= 1G */ |
63 | if (fallback_aper_order > 5) | 63 | if (fallback_aper_order > 5) |
@@ -73,7 +73,7 @@ static u32 __init allocate_aperture(void) | |||
73 | /* | 73 | /* |
74 | * using 512M as goal, in case kexec will load kernel_big | 74 | * using 512M as goal, in case kexec will load kernel_big |
75 | * that will do the on position decompress, and could overlap with | 75 | * that will do the on position decompress, and could overlap with |
76 | * that positon with gart that is used. | 76 | * that position with gart that is used. |
77 | * sequende: | 77 | * sequende: |
78 | * kernel_small | 78 | * kernel_small |
79 | * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) | 79 | * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) |
@@ -83,27 +83,26 @@ static u32 __init allocate_aperture(void) | |||
83 | * so don't use 512M below as gart iommu, leave the space for kernel | 83 | * so don't use 512M below as gart iommu, leave the space for kernel |
84 | * code for safe | 84 | * code for safe |
85 | */ | 85 | */ |
86 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); | 86 | addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20); |
87 | if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) { | ||
88 | printk(KERN_ERR | ||
89 | "Cannot allocate aperture memory hole (%lx,%uK)\n", | ||
90 | addr, aper_size>>10); | ||
91 | return 0; | ||
92 | } | ||
93 | memblock_x86_reserve_range(addr, addr + aper_size, "aperture64"); | ||
87 | /* | 94 | /* |
88 | * Kmemleak should not scan this block as it may not be mapped via the | 95 | * Kmemleak should not scan this block as it may not be mapped via the |
89 | * kernel direct mapping. | 96 | * kernel direct mapping. |
90 | */ | 97 | */ |
91 | kmemleak_ignore(p); | 98 | kmemleak_ignore(phys_to_virt(addr)); |
92 | if (!p || __pa(p)+aper_size > 0xffffffff) { | ||
93 | printk(KERN_ERR | ||
94 | "Cannot allocate aperture memory hole (%p,%uK)\n", | ||
95 | p, aper_size>>10); | ||
96 | if (p) | ||
97 | free_bootmem(__pa(p), aper_size); | ||
98 | return 0; | ||
99 | } | ||
100 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", | 99 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", |
101 | aper_size >> 10, __pa(p)); | 100 | aper_size >> 10, addr); |
102 | insert_aperture_resource((u32)__pa(p), aper_size); | 101 | insert_aperture_resource((u32)addr, aper_size); |
103 | register_nosave_region((u32)__pa(p) >> PAGE_SHIFT, | 102 | register_nosave_region(addr >> PAGE_SHIFT, |
104 | (u32)__pa(p+aper_size) >> PAGE_SHIFT); | 103 | (addr+aper_size) >> PAGE_SHIFT); |
105 | 104 | ||
106 | return (u32)__pa(p); | 105 | return (u32)addr; |
107 | } | 106 | } |
108 | 107 | ||
109 | 108 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 76b96d74978a..966673f44141 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <asm/i8259.h> | 43 | #include <asm/i8259.h> |
44 | #include <asm/proto.h> | 44 | #include <asm/proto.h> |
45 | #include <asm/apic.h> | 45 | #include <asm/apic.h> |
46 | #include <asm/io_apic.h> | ||
46 | #include <asm/desc.h> | 47 | #include <asm/desc.h> |
47 | #include <asm/hpet.h> | 48 | #include <asm/hpet.h> |
48 | #include <asm/idle.h> | 49 | #include <asm/idle.h> |
@@ -78,12 +79,21 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); | |||
78 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | 79 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); |
79 | 80 | ||
80 | #ifdef CONFIG_X86_32 | 81 | #ifdef CONFIG_X86_32 |
82 | |||
83 | /* | ||
84 | * On x86_32, the mapping between cpu and logical apicid may vary | ||
85 | * depending on apic in use. The following early percpu variable is | ||
86 | * used for the mapping. This is where the behaviors of x86_64 and 32 | ||
87 | * actually diverge. Let's keep it ugly for now. | ||
88 | */ | ||
89 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID); | ||
90 | |||
81 | /* | 91 | /* |
82 | * Knob to control our willingness to enable the local APIC. | 92 | * Knob to control our willingness to enable the local APIC. |
83 | * | 93 | * |
84 | * +1=force-enable | 94 | * +1=force-enable |
85 | */ | 95 | */ |
86 | static int force_enable_local_apic; | 96 | static int force_enable_local_apic __initdata; |
87 | /* | 97 | /* |
88 | * APIC command line parameters | 98 | * APIC command line parameters |
89 | */ | 99 | */ |
@@ -153,7 +163,7 @@ early_param("nox2apic", setup_nox2apic); | |||
153 | unsigned long mp_lapic_addr; | 163 | unsigned long mp_lapic_addr; |
154 | int disable_apic; | 164 | int disable_apic; |
155 | /* Disable local APIC timer from the kernel commandline or via dmi quirk */ | 165 | /* Disable local APIC timer from the kernel commandline or via dmi quirk */ |
156 | static int disable_apic_timer __cpuinitdata; | 166 | static int disable_apic_timer __initdata; |
157 | /* Local APIC timer works in C2 */ | 167 | /* Local APIC timer works in C2 */ |
158 | int local_apic_timer_c2_ok; | 168 | int local_apic_timer_c2_ok; |
159 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); | 169 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); |
@@ -177,29 +187,8 @@ static struct resource lapic_resource = { | |||
177 | 187 | ||
178 | static unsigned int calibration_result; | 188 | static unsigned int calibration_result; |
179 | 189 | ||
180 | static int lapic_next_event(unsigned long delta, | ||
181 | struct clock_event_device *evt); | ||
182 | static void lapic_timer_setup(enum clock_event_mode mode, | ||
183 | struct clock_event_device *evt); | ||
184 | static void lapic_timer_broadcast(const struct cpumask *mask); | ||
185 | static void apic_pm_activate(void); | 190 | static void apic_pm_activate(void); |
186 | 191 | ||
187 | /* | ||
188 | * The local apic timer can be used for any function which is CPU local. | ||
189 | */ | ||
190 | static struct clock_event_device lapic_clockevent = { | ||
191 | .name = "lapic", | ||
192 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | ||
193 | | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, | ||
194 | .shift = 32, | ||
195 | .set_mode = lapic_timer_setup, | ||
196 | .set_next_event = lapic_next_event, | ||
197 | .broadcast = lapic_timer_broadcast, | ||
198 | .rating = 100, | ||
199 | .irq = -1, | ||
200 | }; | ||
201 | static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | ||
202 | |||
203 | static unsigned long apic_phys; | 192 | static unsigned long apic_phys; |
204 | 193 | ||
205 | /* | 194 | /* |
@@ -238,7 +227,7 @@ static int modern_apic(void) | |||
238 | * right after this call apic become NOOP driven | 227 | * right after this call apic become NOOP driven |
239 | * so apic->write/read doesn't do anything | 228 | * so apic->write/read doesn't do anything |
240 | */ | 229 | */ |
241 | void apic_disable(void) | 230 | static void __init apic_disable(void) |
242 | { | 231 | { |
243 | pr_info("APIC: switched to apic NOOP\n"); | 232 | pr_info("APIC: switched to apic NOOP\n"); |
244 | apic = &apic_noop; | 233 | apic = &apic_noop; |
@@ -282,23 +271,6 @@ u64 native_apic_icr_read(void) | |||
282 | return icr1 | ((u64)icr2 << 32); | 271 | return icr1 | ((u64)icr2 << 32); |
283 | } | 272 | } |
284 | 273 | ||
285 | /** | ||
286 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 | ||
287 | */ | ||
288 | void __cpuinit enable_NMI_through_LVT0(void) | ||
289 | { | ||
290 | unsigned int v; | ||
291 | |||
292 | /* unmask and set to NMI */ | ||
293 | v = APIC_DM_NMI; | ||
294 | |||
295 | /* Level triggered for 82489DX (32bit mode) */ | ||
296 | if (!lapic_is_integrated()) | ||
297 | v |= APIC_LVT_LEVEL_TRIGGER; | ||
298 | |||
299 | apic_write(APIC_LVT0, v); | ||
300 | } | ||
301 | |||
302 | #ifdef CONFIG_X86_32 | 274 | #ifdef CONFIG_X86_32 |
303 | /** | 275 | /** |
304 | * get_physical_broadcast - Get number of physical broadcast IDs | 276 | * get_physical_broadcast - Get number of physical broadcast IDs |
@@ -508,6 +480,23 @@ static void lapic_timer_broadcast(const struct cpumask *mask) | |||
508 | #endif | 480 | #endif |
509 | } | 481 | } |
510 | 482 | ||
483 | |||
484 | /* | ||
485 | * The local apic timer can be used for any function which is CPU local. | ||
486 | */ | ||
487 | static struct clock_event_device lapic_clockevent = { | ||
488 | .name = "lapic", | ||
489 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | ||
490 | | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, | ||
491 | .shift = 32, | ||
492 | .set_mode = lapic_timer_setup, | ||
493 | .set_next_event = lapic_next_event, | ||
494 | .broadcast = lapic_timer_broadcast, | ||
495 | .rating = 100, | ||
496 | .irq = -1, | ||
497 | }; | ||
498 | static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | ||
499 | |||
511 | /* | 500 | /* |
512 | * Setup the local APIC timer for this CPU. Copy the initialized values | 501 | * Setup the local APIC timer for this CPU. Copy the initialized values |
513 | * of the boot CPU and register the clock event in the framework. | 502 | * of the boot CPU and register the clock event in the framework. |
@@ -1209,7 +1198,7 @@ void __cpuinit setup_local_APIC(void) | |||
1209 | rdtscll(tsc); | 1198 | rdtscll(tsc); |
1210 | 1199 | ||
1211 | if (disable_apic) { | 1200 | if (disable_apic) { |
1212 | arch_disable_smp_support(); | 1201 | disable_ioapic_support(); |
1213 | return; | 1202 | return; |
1214 | } | 1203 | } |
1215 | 1204 | ||
@@ -1237,6 +1226,19 @@ void __cpuinit setup_local_APIC(void) | |||
1237 | */ | 1226 | */ |
1238 | apic->init_apic_ldr(); | 1227 | apic->init_apic_ldr(); |
1239 | 1228 | ||
1229 | #ifdef CONFIG_X86_32 | ||
1230 | /* | ||
1231 | * APIC LDR is initialized. If logical_apicid mapping was | ||
1232 | * initialized during get_smp_config(), make sure it matches the | ||
1233 | * actual value. | ||
1234 | */ | ||
1235 | i = early_per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
1236 | WARN_ON(i != BAD_APICID && i != logical_smp_processor_id()); | ||
1237 | /* always use the value from LDR */ | ||
1238 | early_per_cpu(x86_cpu_to_logical_apicid, cpu) = | ||
1239 | logical_smp_processor_id(); | ||
1240 | #endif | ||
1241 | |||
1240 | /* | 1242 | /* |
1241 | * Set Task Priority to 'accept all'. We never change this | 1243 | * Set Task Priority to 'accept all'. We never change this |
1242 | * later on. | 1244 | * later on. |
@@ -1448,7 +1450,7 @@ int __init enable_IR(void) | |||
1448 | void __init enable_IR_x2apic(void) | 1450 | void __init enable_IR_x2apic(void) |
1449 | { | 1451 | { |
1450 | unsigned long flags; | 1452 | unsigned long flags; |
1451 | struct IO_APIC_route_entry **ioapic_entries = NULL; | 1453 | struct IO_APIC_route_entry **ioapic_entries; |
1452 | int ret, x2apic_enabled = 0; | 1454 | int ret, x2apic_enabled = 0; |
1453 | int dmar_table_init_ret; | 1455 | int dmar_table_init_ret; |
1454 | 1456 | ||
@@ -1537,7 +1539,7 @@ static int __init detect_init_APIC(void) | |||
1537 | } | 1539 | } |
1538 | #else | 1540 | #else |
1539 | 1541 | ||
1540 | static int apic_verify(void) | 1542 | static int __init apic_verify(void) |
1541 | { | 1543 | { |
1542 | u32 features, h, l; | 1544 | u32 features, h, l; |
1543 | 1545 | ||
@@ -1562,7 +1564,7 @@ static int apic_verify(void) | |||
1562 | return 0; | 1564 | return 0; |
1563 | } | 1565 | } |
1564 | 1566 | ||
1565 | int apic_force_enable(void) | 1567 | int __init apic_force_enable(unsigned long addr) |
1566 | { | 1568 | { |
1567 | u32 h, l; | 1569 | u32 h, l; |
1568 | 1570 | ||
@@ -1578,7 +1580,7 @@ int apic_force_enable(void) | |||
1578 | if (!(l & MSR_IA32_APICBASE_ENABLE)) { | 1580 | if (!(l & MSR_IA32_APICBASE_ENABLE)) { |
1579 | pr_info("Local APIC disabled by BIOS -- reenabling.\n"); | 1581 | pr_info("Local APIC disabled by BIOS -- reenabling.\n"); |
1580 | l &= ~MSR_IA32_APICBASE_BASE; | 1582 | l &= ~MSR_IA32_APICBASE_BASE; |
1581 | l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; | 1583 | l |= MSR_IA32_APICBASE_ENABLE | addr; |
1582 | wrmsr(MSR_IA32_APICBASE, l, h); | 1584 | wrmsr(MSR_IA32_APICBASE, l, h); |
1583 | enabled_via_apicbase = 1; | 1585 | enabled_via_apicbase = 1; |
1584 | } | 1586 | } |
@@ -1619,7 +1621,7 @@ static int __init detect_init_APIC(void) | |||
1619 | "you can enable it with \"lapic\"\n"); | 1621 | "you can enable it with \"lapic\"\n"); |
1620 | return -1; | 1622 | return -1; |
1621 | } | 1623 | } |
1622 | if (apic_force_enable()) | 1624 | if (apic_force_enable(APIC_DEFAULT_PHYS_BASE)) |
1623 | return -1; | 1625 | return -1; |
1624 | } else { | 1626 | } else { |
1625 | if (apic_verify()) | 1627 | if (apic_verify()) |
@@ -1930,17 +1932,6 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1930 | { | 1932 | { |
1931 | int cpu; | 1933 | int cpu; |
1932 | 1934 | ||
1933 | /* | ||
1934 | * Validate version | ||
1935 | */ | ||
1936 | if (version == 0x0) { | ||
1937 | pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " | ||
1938 | "fixing up to 0x10. (tell your hw vendor)\n", | ||
1939 | version); | ||
1940 | version = 0x10; | ||
1941 | } | ||
1942 | apic_version[apicid] = version; | ||
1943 | |||
1944 | if (num_processors >= nr_cpu_ids) { | 1935 | if (num_processors >= nr_cpu_ids) { |
1945 | int max = nr_cpu_ids; | 1936 | int max = nr_cpu_ids; |
1946 | int thiscpu = max + disabled_cpus; | 1937 | int thiscpu = max + disabled_cpus; |
@@ -1954,22 +1945,34 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1954 | } | 1945 | } |
1955 | 1946 | ||
1956 | num_processors++; | 1947 | num_processors++; |
1957 | cpu = cpumask_next_zero(-1, cpu_present_mask); | ||
1958 | |||
1959 | if (version != apic_version[boot_cpu_physical_apicid]) | ||
1960 | WARN_ONCE(1, | ||
1961 | "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", | ||
1962 | apic_version[boot_cpu_physical_apicid], cpu, version); | ||
1963 | |||
1964 | physid_set(apicid, phys_cpu_present_map); | ||
1965 | if (apicid == boot_cpu_physical_apicid) { | 1948 | if (apicid == boot_cpu_physical_apicid) { |
1966 | /* | 1949 | /* |
1967 | * x86_bios_cpu_apicid is required to have processors listed | 1950 | * x86_bios_cpu_apicid is required to have processors listed |
1968 | * in same order as logical cpu numbers. Hence the first | 1951 | * in same order as logical cpu numbers. Hence the first |
1969 | * entry is BSP, and so on. | 1952 | * entry is BSP, and so on. |
1953 | * boot_cpu_init() already hold bit 0 in cpu_present_mask | ||
1954 | * for BSP. | ||
1970 | */ | 1955 | */ |
1971 | cpu = 0; | 1956 | cpu = 0; |
1957 | } else | ||
1958 | cpu = cpumask_next_zero(-1, cpu_present_mask); | ||
1959 | |||
1960 | /* | ||
1961 | * Validate version | ||
1962 | */ | ||
1963 | if (version == 0x0) { | ||
1964 | pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n", | ||
1965 | cpu, apicid); | ||
1966 | version = 0x10; | ||
1972 | } | 1967 | } |
1968 | apic_version[apicid] = version; | ||
1969 | |||
1970 | if (version != apic_version[boot_cpu_physical_apicid]) { | ||
1971 | pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n", | ||
1972 | apic_version[boot_cpu_physical_apicid], cpu, version); | ||
1973 | } | ||
1974 | |||
1975 | physid_set(apicid, phys_cpu_present_map); | ||
1973 | if (apicid > max_physical_apicid) | 1976 | if (apicid > max_physical_apicid) |
1974 | max_physical_apicid = apicid; | 1977 | max_physical_apicid = apicid; |
1975 | 1978 | ||
@@ -1977,7 +1980,10 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1977 | early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; | 1980 | early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; |
1978 | early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; | 1981 | early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; |
1979 | #endif | 1982 | #endif |
1980 | 1983 | #ifdef CONFIG_X86_32 | |
1984 | early_per_cpu(x86_cpu_to_logical_apicid, cpu) = | ||
1985 | apic->x86_32_early_logical_apicid(cpu); | ||
1986 | #endif | ||
1981 | set_cpu_possible(cpu, true); | 1987 | set_cpu_possible(cpu, true); |
1982 | set_cpu_present(cpu, true); | 1988 | set_cpu_present(cpu, true); |
1983 | } | 1989 | } |
@@ -1998,10 +2004,14 @@ void default_init_apic_ldr(void) | |||
1998 | } | 2004 | } |
1999 | 2005 | ||
2000 | #ifdef CONFIG_X86_32 | 2006 | #ifdef CONFIG_X86_32 |
2001 | int default_apicid_to_node(int logical_apicid) | 2007 | int default_x86_32_numa_cpu_node(int cpu) |
2002 | { | 2008 | { |
2003 | #ifdef CONFIG_SMP | 2009 | #ifdef CONFIG_NUMA |
2004 | return apicid_2_node[hard_smp_processor_id()]; | 2010 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); |
2011 | |||
2012 | if (apicid != BAD_APICID) | ||
2013 | return __apicid_to_node[apicid]; | ||
2014 | return NUMA_NO_NODE; | ||
2005 | #else | 2015 | #else |
2006 | return 0; | 2016 | return 0; |
2007 | #endif | 2017 | #endif |
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 09d3b17ce0c2..5652d31fe108 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -185,8 +185,6 @@ struct apic apic_flat = { | |||
185 | .ioapic_phys_id_map = NULL, | 185 | .ioapic_phys_id_map = NULL, |
186 | .setup_apic_routing = NULL, | 186 | .setup_apic_routing = NULL, |
187 | .multi_timer_check = NULL, | 187 | .multi_timer_check = NULL, |
188 | .apicid_to_node = NULL, | ||
189 | .cpu_to_logical_apicid = NULL, | ||
190 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 188 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
191 | .apicid_to_cpu_present = NULL, | 189 | .apicid_to_cpu_present = NULL, |
192 | .setup_portio_remap = NULL, | 190 | .setup_portio_remap = NULL, |
@@ -337,8 +335,6 @@ struct apic apic_physflat = { | |||
337 | .ioapic_phys_id_map = NULL, | 335 | .ioapic_phys_id_map = NULL, |
338 | .setup_apic_routing = NULL, | 336 | .setup_apic_routing = NULL, |
339 | .multi_timer_check = NULL, | 337 | .multi_timer_check = NULL, |
340 | .apicid_to_node = NULL, | ||
341 | .cpu_to_logical_apicid = NULL, | ||
342 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 338 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
343 | .apicid_to_cpu_present = NULL, | 339 | .apicid_to_cpu_present = NULL, |
344 | .setup_portio_remap = NULL, | 340 | .setup_portio_remap = NULL, |
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index e31b9ffe25f5..f1baa2dc087a 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c | |||
@@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void) | |||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | static int noop_cpu_to_logical_apicid(int cpu) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static int noop_phys_pkg_id(int cpuid_apic, int index_msb) | 57 | static int noop_phys_pkg_id(int cpuid_apic, int index_msb) |
63 | { | 58 | { |
64 | return 0; | 59 | return 0; |
@@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) | |||
113 | cpumask_set_cpu(cpu, retmask); | 108 | cpumask_set_cpu(cpu, retmask); |
114 | } | 109 | } |
115 | 110 | ||
116 | int noop_apicid_to_node(int logical_apicid) | ||
117 | { | ||
118 | /* we're always on node 0 */ | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static u32 noop_apic_read(u32 reg) | 111 | static u32 noop_apic_read(u32 reg) |
123 | { | 112 | { |
124 | WARN_ON_ONCE((cpu_has_apic && !disable_apic)); | 113 | WARN_ON_ONCE((cpu_has_apic && !disable_apic)); |
@@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v) | |||
130 | WARN_ON_ONCE(cpu_has_apic && !disable_apic); | 119 | WARN_ON_ONCE(cpu_has_apic && !disable_apic); |
131 | } | 120 | } |
132 | 121 | ||
122 | #ifdef CONFIG_X86_32 | ||
123 | static int noop_x86_32_numa_cpu_node(int cpu) | ||
124 | { | ||
125 | /* we're always on node 0 */ | ||
126 | return 0; | ||
127 | } | ||
128 | #endif | ||
129 | |||
133 | struct apic apic_noop = { | 130 | struct apic apic_noop = { |
134 | .name = "noop", | 131 | .name = "noop", |
135 | .probe = noop_probe, | 132 | .probe = noop_probe, |
@@ -153,9 +150,7 @@ struct apic apic_noop = { | |||
153 | .ioapic_phys_id_map = default_ioapic_phys_id_map, | 150 | .ioapic_phys_id_map = default_ioapic_phys_id_map, |
154 | .setup_apic_routing = NULL, | 151 | .setup_apic_routing = NULL, |
155 | .multi_timer_check = NULL, | 152 | .multi_timer_check = NULL, |
156 | .apicid_to_node = noop_apicid_to_node, | ||
157 | 153 | ||
158 | .cpu_to_logical_apicid = noop_cpu_to_logical_apicid, | ||
159 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 154 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
160 | .apicid_to_cpu_present = physid_set_mask_of_physid, | 155 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
161 | 156 | ||
@@ -197,4 +192,9 @@ struct apic apic_noop = { | |||
197 | .icr_write = noop_apic_icr_write, | 192 | .icr_write = noop_apic_icr_write, |
198 | .wait_icr_idle = noop_apic_wait_icr_idle, | 193 | .wait_icr_idle = noop_apic_wait_icr_idle, |
199 | .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, | 194 | .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, |
195 | |||
196 | #ifdef CONFIG_X86_32 | ||
197 | .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, | ||
198 | .x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node, | ||
199 | #endif | ||
200 | }; | 200 | }; |
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index cb804c5091b9..541a2e431659 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
@@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit) | |||
45 | return 1; | 45 | return 1; |
46 | } | 46 | } |
47 | 47 | ||
48 | static int bigsmp_early_logical_apicid(int cpu) | ||
49 | { | ||
50 | /* on bigsmp, logical apicid is the same as physical */ | ||
51 | return early_per_cpu(x86_cpu_to_apicid, cpu); | ||
52 | } | ||
53 | |||
48 | static inline unsigned long calculate_ldr(int cpu) | 54 | static inline unsigned long calculate_ldr(int cpu) |
49 | { | 55 | { |
50 | unsigned long val, id; | 56 | unsigned long val, id; |
@@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void) | |||
80 | nr_ioapics); | 86 | nr_ioapics); |
81 | } | 87 | } |
82 | 88 | ||
83 | static int bigsmp_apicid_to_node(int logical_apicid) | ||
84 | { | ||
85 | return apicid_2_node[hard_smp_processor_id()]; | ||
86 | } | ||
87 | |||
88 | static int bigsmp_cpu_present_to_apicid(int mps_cpu) | 89 | static int bigsmp_cpu_present_to_apicid(int mps_cpu) |
89 | { | 90 | { |
90 | if (mps_cpu < nr_cpu_ids) | 91 | if (mps_cpu < nr_cpu_ids) |
@@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu) | |||
93 | return BAD_APICID; | 94 | return BAD_APICID; |
94 | } | 95 | } |
95 | 96 | ||
96 | /* Mapping from cpu number to logical apicid */ | ||
97 | static inline int bigsmp_cpu_to_logical_apicid(int cpu) | ||
98 | { | ||
99 | if (cpu >= nr_cpu_ids) | ||
100 | return BAD_APICID; | ||
101 | return cpu_physical_id(cpu); | ||
102 | } | ||
103 | |||
104 | static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) | 97 | static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
105 | { | 98 | { |
106 | /* For clustered we don't have a good way to do this yet - hack */ | 99 | /* For clustered we don't have a good way to do this yet - hack */ |
@@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid) | |||
115 | /* As we are using single CPU as destination, pick only one CPU here */ | 108 | /* As we are using single CPU as destination, pick only one CPU here */ |
116 | static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) | 109 | static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) |
117 | { | 110 | { |
118 | return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); | 111 | int cpu = cpumask_first(cpumask); |
112 | |||
113 | if (cpu < nr_cpu_ids) | ||
114 | return cpu_physical_id(cpu); | ||
115 | return BAD_APICID; | ||
119 | } | 116 | } |
120 | 117 | ||
121 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | 118 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
@@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
129 | */ | 126 | */ |
130 | for_each_cpu_and(cpu, cpumask, andmask) { | 127 | for_each_cpu_and(cpu, cpumask, andmask) { |
131 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | 128 | if (cpumask_test_cpu(cpu, cpu_online_mask)) |
132 | break; | 129 | return cpu_physical_id(cpu); |
133 | } | 130 | } |
134 | return bigsmp_cpu_to_logical_apicid(cpu); | 131 | return BAD_APICID; |
135 | } | 132 | } |
136 | 133 | ||
137 | static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) | 134 | static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) |
@@ -219,8 +216,6 @@ struct apic apic_bigsmp = { | |||
219 | .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, | 216 | .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, |
220 | .setup_apic_routing = bigsmp_setup_apic_routing, | 217 | .setup_apic_routing = bigsmp_setup_apic_routing, |
221 | .multi_timer_check = NULL, | 218 | .multi_timer_check = NULL, |
222 | .apicid_to_node = bigsmp_apicid_to_node, | ||
223 | .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, | ||
224 | .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, | 219 | .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, |
225 | .apicid_to_cpu_present = physid_set_mask_of_physid, | 220 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
226 | .setup_portio_remap = NULL, | 221 | .setup_portio_remap = NULL, |
@@ -256,4 +251,7 @@ struct apic apic_bigsmp = { | |||
256 | .icr_write = native_apic_icr_write, | 251 | .icr_write = native_apic_icr_write, |
257 | .wait_icr_idle = native_apic_wait_icr_idle, | 252 | .wait_icr_idle = native_apic_wait_icr_idle, |
258 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 253 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
254 | |||
255 | .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, | ||
256 | .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, | ||
259 | }; | 257 | }; |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 8593582d8022..3e9de4854c5b 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit) | |||
460 | return physid_isset(bit, phys_cpu_present_map); | 460 | return physid_isset(bit, phys_cpu_present_map); |
461 | } | 461 | } |
462 | 462 | ||
463 | static int es7000_early_logical_apicid(int cpu) | ||
464 | { | ||
465 | /* on es7000, logical apicid is the same as physical */ | ||
466 | return early_per_cpu(x86_bios_cpu_apicid, cpu); | ||
467 | } | ||
468 | |||
463 | static unsigned long calculate_ldr(int cpu) | 469 | static unsigned long calculate_ldr(int cpu) |
464 | { | 470 | { |
465 | unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); | 471 | unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); |
@@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void) | |||
504 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); | 510 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); |
505 | } | 511 | } |
506 | 512 | ||
507 | static int es7000_apicid_to_node(int logical_apicid) | 513 | static int es7000_numa_cpu_node(int cpu) |
508 | { | 514 | { |
509 | return 0; | 515 | return 0; |
510 | } | 516 | } |
511 | 517 | ||
512 | |||
513 | static int es7000_cpu_present_to_apicid(int mps_cpu) | 518 | static int es7000_cpu_present_to_apicid(int mps_cpu) |
514 | { | 519 | { |
515 | if (!mps_cpu) | 520 | if (!mps_cpu) |
@@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap) | |||
528 | ++cpu_id; | 533 | ++cpu_id; |
529 | } | 534 | } |
530 | 535 | ||
531 | /* Mapping from cpu number to logical apicid */ | ||
532 | static int es7000_cpu_to_logical_apicid(int cpu) | ||
533 | { | ||
534 | #ifdef CONFIG_SMP | ||
535 | if (cpu >= nr_cpu_ids) | ||
536 | return BAD_APICID; | ||
537 | return cpu_2_logical_apicid[cpu]; | ||
538 | #else | ||
539 | return logical_smp_processor_id(); | ||
540 | #endif | ||
541 | } | ||
542 | |||
543 | static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) | 536 | static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
544 | { | 537 | { |
545 | /* For clustered we don't have a good way to do this yet - hack */ | 538 | /* For clustered we don't have a good way to do this yet - hack */ |
@@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) | |||
561 | * The cpus in the mask must all be on the apic cluster. | 554 | * The cpus in the mask must all be on the apic cluster. |
562 | */ | 555 | */ |
563 | for_each_cpu(cpu, cpumask) { | 556 | for_each_cpu(cpu, cpumask) { |
564 | int new_apicid = es7000_cpu_to_logical_apicid(cpu); | 557 | int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); |
565 | 558 | ||
566 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | 559 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { |
567 | WARN(1, "Not a valid mask!"); | 560 | WARN(1, "Not a valid mask!"); |
@@ -578,7 +571,7 @@ static unsigned int | |||
578 | es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, | 571 | es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, |
579 | const struct cpumask *andmask) | 572 | const struct cpumask *andmask) |
580 | { | 573 | { |
581 | int apicid = es7000_cpu_to_logical_apicid(0); | 574 | int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); |
582 | cpumask_var_t cpumask; | 575 | cpumask_var_t cpumask; |
583 | 576 | ||
584 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | 577 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) |
@@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = { | |||
655 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, | 648 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, |
656 | .setup_apic_routing = es7000_setup_apic_routing, | 649 | .setup_apic_routing = es7000_setup_apic_routing, |
657 | .multi_timer_check = NULL, | 650 | .multi_timer_check = NULL, |
658 | .apicid_to_node = es7000_apicid_to_node, | ||
659 | .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, | ||
660 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, | 651 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, |
661 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, | 652 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, |
662 | .setup_portio_remap = NULL, | 653 | .setup_portio_remap = NULL, |
@@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = { | |||
695 | .icr_write = native_apic_icr_write, | 686 | .icr_write = native_apic_icr_write, |
696 | .wait_icr_idle = native_apic_wait_icr_idle, | 687 | .wait_icr_idle = native_apic_wait_icr_idle, |
697 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 688 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
689 | |||
690 | .x86_32_early_logical_apicid = es7000_early_logical_apicid, | ||
691 | .x86_32_numa_cpu_node = es7000_numa_cpu_node, | ||
698 | }; | 692 | }; |
699 | 693 | ||
700 | struct apic __refdata apic_es7000 = { | 694 | struct apic __refdata apic_es7000 = { |
@@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = { | |||
720 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, | 714 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, |
721 | .setup_apic_routing = es7000_setup_apic_routing, | 715 | .setup_apic_routing = es7000_setup_apic_routing, |
722 | .multi_timer_check = NULL, | 716 | .multi_timer_check = NULL, |
723 | .apicid_to_node = es7000_apicid_to_node, | ||
724 | .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, | ||
725 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, | 717 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, |
726 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, | 718 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, |
727 | .setup_portio_remap = NULL, | 719 | .setup_portio_remap = NULL, |
@@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = { | |||
758 | .icr_write = native_apic_icr_write, | 750 | .icr_write = native_apic_icr_write, |
759 | .wait_icr_idle = native_apic_wait_icr_idle, | 751 | .wait_icr_idle = native_apic_wait_icr_idle, |
760 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 752 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
753 | |||
754 | .x86_32_early_logical_apicid = es7000_early_logical_apicid, | ||
755 | .x86_32_numa_cpu_node = es7000_numa_cpu_node, | ||
761 | }; | 756 | }; |
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 79fd43ca6f96..c4e557a1ebb6 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -83,7 +83,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, | |||
83 | arch_spin_lock(&lock); | 83 | arch_spin_lock(&lock); |
84 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 84 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
85 | show_regs(regs); | 85 | show_regs(regs); |
86 | dump_stack(); | ||
87 | arch_spin_unlock(&lock); | 86 | arch_spin_unlock(&lock); |
88 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | 87 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
89 | return NOTIFY_STOP; | 88 | return NOTIFY_STOP; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ca9e2a3545a9..180ca240e03c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -108,7 +108,10 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | |||
108 | 108 | ||
109 | int skip_ioapic_setup; | 109 | int skip_ioapic_setup; |
110 | 110 | ||
111 | void arch_disable_smp_support(void) | 111 | /** |
112 | * disable_ioapic_support() - disables ioapic support at runtime | ||
113 | */ | ||
114 | void disable_ioapic_support(void) | ||
112 | { | 115 | { |
113 | #ifdef CONFIG_PCI | 116 | #ifdef CONFIG_PCI |
114 | noioapicquirk = 1; | 117 | noioapicquirk = 1; |
@@ -120,11 +123,14 @@ void arch_disable_smp_support(void) | |||
120 | static int __init parse_noapic(char *str) | 123 | static int __init parse_noapic(char *str) |
121 | { | 124 | { |
122 | /* disable IO-APIC */ | 125 | /* disable IO-APIC */ |
123 | arch_disable_smp_support(); | 126 | disable_ioapic_support(); |
124 | return 0; | 127 | return 0; |
125 | } | 128 | } |
126 | early_param("noapic", parse_noapic); | 129 | early_param("noapic", parse_noapic); |
127 | 130 | ||
131 | static int io_apic_setup_irq_pin_once(unsigned int irq, int node, | ||
132 | struct io_apic_irq_attr *attr); | ||
133 | |||
128 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ | 134 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ |
129 | void mp_save_irq(struct mpc_intsrc *m) | 135 | void mp_save_irq(struct mpc_intsrc *m) |
130 | { | 136 | { |
@@ -181,7 +187,7 @@ int __init arch_early_irq_init(void) | |||
181 | irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); | 187 | irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); |
182 | 188 | ||
183 | for (i = 0; i < count; i++) { | 189 | for (i = 0; i < count; i++) { |
184 | set_irq_chip_data(i, &cfg[i]); | 190 | irq_set_chip_data(i, &cfg[i]); |
185 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); | 191 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); |
186 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); | 192 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); |
187 | /* | 193 | /* |
@@ -200,7 +206,7 @@ int __init arch_early_irq_init(void) | |||
200 | #ifdef CONFIG_SPARSE_IRQ | 206 | #ifdef CONFIG_SPARSE_IRQ |
201 | static struct irq_cfg *irq_cfg(unsigned int irq) | 207 | static struct irq_cfg *irq_cfg(unsigned int irq) |
202 | { | 208 | { |
203 | return get_irq_chip_data(irq); | 209 | return irq_get_chip_data(irq); |
204 | } | 210 | } |
205 | 211 | ||
206 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) | 212 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) |
@@ -226,7 +232,7 @@ static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) | |||
226 | { | 232 | { |
227 | if (!cfg) | 233 | if (!cfg) |
228 | return; | 234 | return; |
229 | set_irq_chip_data(at, NULL); | 235 | irq_set_chip_data(at, NULL); |
230 | free_cpumask_var(cfg->domain); | 236 | free_cpumask_var(cfg->domain); |
231 | free_cpumask_var(cfg->old_domain); | 237 | free_cpumask_var(cfg->old_domain); |
232 | kfree(cfg); | 238 | kfree(cfg); |
@@ -256,14 +262,14 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | |||
256 | if (res < 0) { | 262 | if (res < 0) { |
257 | if (res != -EEXIST) | 263 | if (res != -EEXIST) |
258 | return NULL; | 264 | return NULL; |
259 | cfg = get_irq_chip_data(at); | 265 | cfg = irq_get_chip_data(at); |
260 | if (cfg) | 266 | if (cfg) |
261 | return cfg; | 267 | return cfg; |
262 | } | 268 | } |
263 | 269 | ||
264 | cfg = alloc_irq_cfg(at, node); | 270 | cfg = alloc_irq_cfg(at, node); |
265 | if (cfg) | 271 | if (cfg) |
266 | set_irq_chip_data(at, cfg); | 272 | irq_set_chip_data(at, cfg); |
267 | else | 273 | else |
268 | irq_free_desc(at); | 274 | irq_free_desc(at); |
269 | return cfg; | 275 | return cfg; |
@@ -818,7 +824,7 @@ static int EISA_ELCR(unsigned int irq) | |||
818 | #define default_MCA_trigger(idx) (1) | 824 | #define default_MCA_trigger(idx) (1) |
819 | #define default_MCA_polarity(idx) default_ISA_polarity(idx) | 825 | #define default_MCA_polarity(idx) default_ISA_polarity(idx) |
820 | 826 | ||
821 | static int MPBIOS_polarity(int idx) | 827 | static int irq_polarity(int idx) |
822 | { | 828 | { |
823 | int bus = mp_irqs[idx].srcbus; | 829 | int bus = mp_irqs[idx].srcbus; |
824 | int polarity; | 830 | int polarity; |
@@ -860,7 +866,7 @@ static int MPBIOS_polarity(int idx) | |||
860 | return polarity; | 866 | return polarity; |
861 | } | 867 | } |
862 | 868 | ||
863 | static int MPBIOS_trigger(int idx) | 869 | static int irq_trigger(int idx) |
864 | { | 870 | { |
865 | int bus = mp_irqs[idx].srcbus; | 871 | int bus = mp_irqs[idx].srcbus; |
866 | int trigger; | 872 | int trigger; |
@@ -932,16 +938,6 @@ static int MPBIOS_trigger(int idx) | |||
932 | return trigger; | 938 | return trigger; |
933 | } | 939 | } |
934 | 940 | ||
935 | static inline int irq_polarity(int idx) | ||
936 | { | ||
937 | return MPBIOS_polarity(idx); | ||
938 | } | ||
939 | |||
940 | static inline int irq_trigger(int idx) | ||
941 | { | ||
942 | return MPBIOS_trigger(idx); | ||
943 | } | ||
944 | |||
945 | static int pin_2_irq(int idx, int apic, int pin) | 941 | static int pin_2_irq(int idx, int apic, int pin) |
946 | { | 942 | { |
947 | int irq; | 943 | int irq; |
@@ -1189,7 +1185,7 @@ void __setup_vector_irq(int cpu) | |||
1189 | raw_spin_lock(&vector_lock); | 1185 | raw_spin_lock(&vector_lock); |
1190 | /* Mark the inuse vectors */ | 1186 | /* Mark the inuse vectors */ |
1191 | for_each_active_irq(irq) { | 1187 | for_each_active_irq(irq) { |
1192 | cfg = get_irq_chip_data(irq); | 1188 | cfg = irq_get_chip_data(irq); |
1193 | if (!cfg) | 1189 | if (!cfg) |
1194 | continue; | 1190 | continue; |
1195 | /* | 1191 | /* |
@@ -1220,10 +1216,6 @@ void __setup_vector_irq(int cpu) | |||
1220 | static struct irq_chip ioapic_chip; | 1216 | static struct irq_chip ioapic_chip; |
1221 | static struct irq_chip ir_ioapic_chip; | 1217 | static struct irq_chip ir_ioapic_chip; |
1222 | 1218 | ||
1223 | #define IOAPIC_AUTO -1 | ||
1224 | #define IOAPIC_EDGE 0 | ||
1225 | #define IOAPIC_LEVEL 1 | ||
1226 | |||
1227 | #ifdef CONFIG_X86_32 | 1219 | #ifdef CONFIG_X86_32 |
1228 | static inline int IO_APIC_irq_trigger(int irq) | 1220 | static inline int IO_APIC_irq_trigger(int irq) |
1229 | { | 1221 | { |
@@ -1248,35 +1240,31 @@ static inline int IO_APIC_irq_trigger(int irq) | |||
1248 | } | 1240 | } |
1249 | #endif | 1241 | #endif |
1250 | 1242 | ||
1251 | static void ioapic_register_intr(unsigned int irq, unsigned long trigger) | 1243 | static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, |
1244 | unsigned long trigger) | ||
1252 | { | 1245 | { |
1246 | struct irq_chip *chip = &ioapic_chip; | ||
1247 | irq_flow_handler_t hdl; | ||
1248 | bool fasteoi; | ||
1253 | 1249 | ||
1254 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1250 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
1255 | trigger == IOAPIC_LEVEL) | 1251 | trigger == IOAPIC_LEVEL) { |
1256 | irq_set_status_flags(irq, IRQ_LEVEL); | 1252 | irq_set_status_flags(irq, IRQ_LEVEL); |
1257 | else | 1253 | fasteoi = true; |
1254 | } else { | ||
1258 | irq_clear_status_flags(irq, IRQ_LEVEL); | 1255 | irq_clear_status_flags(irq, IRQ_LEVEL); |
1256 | fasteoi = false; | ||
1257 | } | ||
1259 | 1258 | ||
1260 | if (irq_remapped(get_irq_chip_data(irq))) { | 1259 | if (irq_remapped(cfg)) { |
1261 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 1260 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
1262 | if (trigger) | 1261 | chip = &ir_ioapic_chip; |
1263 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | 1262 | fasteoi = trigger != 0; |
1264 | handle_fasteoi_irq, | ||
1265 | "fasteoi"); | ||
1266 | else | ||
1267 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | ||
1268 | handle_edge_irq, "edge"); | ||
1269 | return; | ||
1270 | } | 1263 | } |
1271 | 1264 | ||
1272 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1265 | hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; |
1273 | trigger == IOAPIC_LEVEL) | 1266 | irq_set_chip_and_handler_name(irq, chip, hdl, |
1274 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 1267 | fasteoi ? "fasteoi" : "edge"); |
1275 | handle_fasteoi_irq, | ||
1276 | "fasteoi"); | ||
1277 | else | ||
1278 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | ||
1279 | handle_edge_irq, "edge"); | ||
1280 | } | 1268 | } |
1281 | 1269 | ||
1282 | static int setup_ioapic_entry(int apic_id, int irq, | 1270 | static int setup_ioapic_entry(int apic_id, int irq, |
@@ -1374,7 +1362,7 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, | |||
1374 | return; | 1362 | return; |
1375 | } | 1363 | } |
1376 | 1364 | ||
1377 | ioapic_register_intr(irq, trigger); | 1365 | ioapic_register_intr(irq, cfg, trigger); |
1378 | if (irq < legacy_pic->nr_legacy_irqs) | 1366 | if (irq < legacy_pic->nr_legacy_irqs) |
1379 | legacy_pic->mask(irq); | 1367 | legacy_pic->mask(irq); |
1380 | 1368 | ||
@@ -1385,33 +1373,26 @@ static struct { | |||
1385 | DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); | 1373 | DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); |
1386 | } mp_ioapic_routing[MAX_IO_APICS]; | 1374 | } mp_ioapic_routing[MAX_IO_APICS]; |
1387 | 1375 | ||
1388 | static void __init setup_IO_APIC_irqs(void) | 1376 | static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin) |
1389 | { | 1377 | { |
1390 | int apic_id, pin, idx, irq, notcon = 0; | 1378 | if (idx != -1) |
1391 | int node = cpu_to_node(0); | 1379 | return false; |
1392 | struct irq_cfg *cfg; | ||
1393 | 1380 | ||
1394 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1381 | apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", |
1382 | mp_ioapics[apic_id].apicid, pin); | ||
1383 | return true; | ||
1384 | } | ||
1385 | |||
1386 | static void __init __io_apic_setup_irqs(unsigned int apic_id) | ||
1387 | { | ||
1388 | int idx, node = cpu_to_node(0); | ||
1389 | struct io_apic_irq_attr attr; | ||
1390 | unsigned int pin, irq; | ||
1395 | 1391 | ||
1396 | for (apic_id = 0; apic_id < nr_ioapics; apic_id++) | ||
1397 | for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { | 1392 | for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { |
1398 | idx = find_irq_entry(apic_id, pin, mp_INT); | 1393 | idx = find_irq_entry(apic_id, pin, mp_INT); |
1399 | if (idx == -1) { | 1394 | if (io_apic_pin_not_connected(idx, apic_id, pin)) |
1400 | if (!notcon) { | ||
1401 | notcon = 1; | ||
1402 | apic_printk(APIC_VERBOSE, | ||
1403 | KERN_DEBUG " %d-%d", | ||
1404 | mp_ioapics[apic_id].apicid, pin); | ||
1405 | } else | ||
1406 | apic_printk(APIC_VERBOSE, " %d-%d", | ||
1407 | mp_ioapics[apic_id].apicid, pin); | ||
1408 | continue; | 1395 | continue; |
1409 | } | ||
1410 | if (notcon) { | ||
1411 | apic_printk(APIC_VERBOSE, | ||
1412 | " (apicid-pin) not connected\n"); | ||
1413 | notcon = 0; | ||
1414 | } | ||
1415 | 1396 | ||
1416 | irq = pin_2_irq(idx, apic_id, pin); | 1397 | irq = pin_2_irq(idx, apic_id, pin); |
1417 | 1398 | ||
@@ -1423,25 +1404,24 @@ static void __init setup_IO_APIC_irqs(void) | |||
1423 | * installed and if it returns 1: | 1404 | * installed and if it returns 1: |
1424 | */ | 1405 | */ |
1425 | if (apic->multi_timer_check && | 1406 | if (apic->multi_timer_check && |
1426 | apic->multi_timer_check(apic_id, irq)) | 1407 | apic->multi_timer_check(apic_id, irq)) |
1427 | continue; | 1408 | continue; |
1428 | 1409 | ||
1429 | cfg = alloc_irq_and_cfg_at(irq, node); | 1410 | set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), |
1430 | if (!cfg) | 1411 | irq_polarity(idx)); |
1431 | continue; | ||
1432 | 1412 | ||
1433 | add_pin_to_irq_node(cfg, node, apic_id, pin); | 1413 | io_apic_setup_irq_pin(irq, node, &attr); |
1434 | /* | ||
1435 | * don't mark it in pin_programmed, so later acpi could | ||
1436 | * set it correctly when irq < 16 | ||
1437 | */ | ||
1438 | setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), | ||
1439 | irq_polarity(idx)); | ||
1440 | } | 1414 | } |
1415 | } | ||
1441 | 1416 | ||
1442 | if (notcon) | 1417 | static void __init setup_IO_APIC_irqs(void) |
1443 | apic_printk(APIC_VERBOSE, | 1418 | { |
1444 | " (apicid-pin) not connected\n"); | 1419 | unsigned int apic_id; |
1420 | |||
1421 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | ||
1422 | |||
1423 | for (apic_id = 0; apic_id < nr_ioapics; apic_id++) | ||
1424 | __io_apic_setup_irqs(apic_id); | ||
1445 | } | 1425 | } |
1446 | 1426 | ||
1447 | /* | 1427 | /* |
@@ -1452,7 +1432,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
1452 | void setup_IO_APIC_irq_extra(u32 gsi) | 1432 | void setup_IO_APIC_irq_extra(u32 gsi) |
1453 | { | 1433 | { |
1454 | int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); | 1434 | int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); |
1455 | struct irq_cfg *cfg; | 1435 | struct io_apic_irq_attr attr; |
1456 | 1436 | ||
1457 | /* | 1437 | /* |
1458 | * Convert 'gsi' to 'ioapic.pin'. | 1438 | * Convert 'gsi' to 'ioapic.pin'. |
@@ -1472,21 +1452,10 @@ void setup_IO_APIC_irq_extra(u32 gsi) | |||
1472 | if (apic_id == 0 || irq < NR_IRQS_LEGACY) | 1452 | if (apic_id == 0 || irq < NR_IRQS_LEGACY) |
1473 | return; | 1453 | return; |
1474 | 1454 | ||
1475 | cfg = alloc_irq_and_cfg_at(irq, node); | 1455 | set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), |
1476 | if (!cfg) | 1456 | irq_polarity(idx)); |
1477 | return; | ||
1478 | |||
1479 | add_pin_to_irq_node(cfg, node, apic_id, pin); | ||
1480 | |||
1481 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { | ||
1482 | pr_debug("Pin %d-%d already programmed\n", | ||
1483 | mp_ioapics[apic_id].apicid, pin); | ||
1484 | return; | ||
1485 | } | ||
1486 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); | ||
1487 | 1457 | ||
1488 | setup_ioapic_irq(apic_id, pin, irq, cfg, | 1458 | io_apic_setup_irq_pin_once(irq, node, &attr); |
1489 | irq_trigger(idx), irq_polarity(idx)); | ||
1490 | } | 1459 | } |
1491 | 1460 | ||
1492 | /* | 1461 | /* |
@@ -1518,7 +1487,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, | |||
1518 | * The timer IRQ doesn't have to know that behind the | 1487 | * The timer IRQ doesn't have to know that behind the |
1519 | * scene we may have a 8259A-master in AEOI mode ... | 1488 | * scene we may have a 8259A-master in AEOI mode ... |
1520 | */ | 1489 | */ |
1521 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | 1490 | irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, |
1491 | "edge"); | ||
1522 | 1492 | ||
1523 | /* | 1493 | /* |
1524 | * Add it to the IO-APIC irq-routing table: | 1494 | * Add it to the IO-APIC irq-routing table: |
@@ -1625,7 +1595,7 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1625 | for_each_active_irq(irq) { | 1595 | for_each_active_irq(irq) { |
1626 | struct irq_pin_list *entry; | 1596 | struct irq_pin_list *entry; |
1627 | 1597 | ||
1628 | cfg = get_irq_chip_data(irq); | 1598 | cfg = irq_get_chip_data(irq); |
1629 | if (!cfg) | 1599 | if (!cfg) |
1630 | continue; | 1600 | continue; |
1631 | entry = cfg->irq_2_pin; | 1601 | entry = cfg->irq_2_pin; |
@@ -1916,7 +1886,7 @@ void disable_IO_APIC(void) | |||
1916 | * | 1886 | * |
1917 | * With interrupt-remapping, for now we will use virtual wire A mode, | 1887 | * With interrupt-remapping, for now we will use virtual wire A mode, |
1918 | * as virtual wire B is little complex (need to configure both | 1888 | * as virtual wire B is little complex (need to configure both |
1919 | * IOAPIC RTE aswell as interrupt-remapping table entry). | 1889 | * IOAPIC RTE as well as interrupt-remapping table entry). |
1920 | * As this gets called during crash dump, keep this simple for now. | 1890 | * As this gets called during crash dump, keep this simple for now. |
1921 | */ | 1891 | */ |
1922 | if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { | 1892 | if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { |
@@ -2391,7 +2361,7 @@ static void irq_complete_move(struct irq_cfg *cfg) | |||
2391 | 2361 | ||
2392 | void irq_force_complete_move(int irq) | 2362 | void irq_force_complete_move(int irq) |
2393 | { | 2363 | { |
2394 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 2364 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
2395 | 2365 | ||
2396 | if (!cfg) | 2366 | if (!cfg) |
2397 | return; | 2367 | return; |
@@ -2405,7 +2375,7 @@ static inline void irq_complete_move(struct irq_cfg *cfg) { } | |||
2405 | static void ack_apic_edge(struct irq_data *data) | 2375 | static void ack_apic_edge(struct irq_data *data) |
2406 | { | 2376 | { |
2407 | irq_complete_move(data->chip_data); | 2377 | irq_complete_move(data->chip_data); |
2408 | move_native_irq(data->irq); | 2378 | irq_move_irq(data); |
2409 | ack_APIC_irq(); | 2379 | ack_APIC_irq(); |
2410 | } | 2380 | } |
2411 | 2381 | ||
@@ -2462,7 +2432,7 @@ static void ack_apic_level(struct irq_data *data) | |||
2462 | irq_complete_move(cfg); | 2432 | irq_complete_move(cfg); |
2463 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 2433 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
2464 | /* If we are moving the irq we need to mask it */ | 2434 | /* If we are moving the irq we need to mask it */ |
2465 | if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { | 2435 | if (unlikely(irqd_is_setaffinity_pending(data))) { |
2466 | do_unmask_irq = 1; | 2436 | do_unmask_irq = 1; |
2467 | mask_ioapic(cfg); | 2437 | mask_ioapic(cfg); |
2468 | } | 2438 | } |
@@ -2551,7 +2521,7 @@ static void ack_apic_level(struct irq_data *data) | |||
2551 | * and you can go talk to the chipset vendor about it. | 2521 | * and you can go talk to the chipset vendor about it. |
2552 | */ | 2522 | */ |
2553 | if (!io_apic_level_ack_pending(cfg)) | 2523 | if (!io_apic_level_ack_pending(cfg)) |
2554 | move_masked_irq(irq); | 2524 | irq_move_masked_irq(data); |
2555 | unmask_ioapic(cfg); | 2525 | unmask_ioapic(cfg); |
2556 | } | 2526 | } |
2557 | } | 2527 | } |
@@ -2614,7 +2584,7 @@ static inline void init_IO_APIC_traps(void) | |||
2614 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2584 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
2615 | */ | 2585 | */ |
2616 | for_each_active_irq(irq) { | 2586 | for_each_active_irq(irq) { |
2617 | cfg = get_irq_chip_data(irq); | 2587 | cfg = irq_get_chip_data(irq); |
2618 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { | 2588 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { |
2619 | /* | 2589 | /* |
2620 | * Hmm.. We don't have an entry for this, | 2590 | * Hmm.. We don't have an entry for this, |
@@ -2625,7 +2595,7 @@ static inline void init_IO_APIC_traps(void) | |||
2625 | legacy_pic->make_irq(irq); | 2595 | legacy_pic->make_irq(irq); |
2626 | else | 2596 | else |
2627 | /* Strange. Oh, well.. */ | 2597 | /* Strange. Oh, well.. */ |
2628 | set_irq_chip(irq, &no_irq_chip); | 2598 | irq_set_chip(irq, &no_irq_chip); |
2629 | } | 2599 | } |
2630 | } | 2600 | } |
2631 | } | 2601 | } |
@@ -2665,7 +2635,7 @@ static struct irq_chip lapic_chip __read_mostly = { | |||
2665 | static void lapic_register_intr(int irq) | 2635 | static void lapic_register_intr(int irq) |
2666 | { | 2636 | { |
2667 | irq_clear_status_flags(irq, IRQ_LEVEL); | 2637 | irq_clear_status_flags(irq, IRQ_LEVEL); |
2668 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | 2638 | irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, |
2669 | "edge"); | 2639 | "edge"); |
2670 | } | 2640 | } |
2671 | 2641 | ||
@@ -2749,7 +2719,7 @@ int timer_through_8259 __initdata; | |||
2749 | */ | 2719 | */ |
2750 | static inline void __init check_timer(void) | 2720 | static inline void __init check_timer(void) |
2751 | { | 2721 | { |
2752 | struct irq_cfg *cfg = get_irq_chip_data(0); | 2722 | struct irq_cfg *cfg = irq_get_chip_data(0); |
2753 | int node = cpu_to_node(0); | 2723 | int node = cpu_to_node(0); |
2754 | int apic1, pin1, apic2, pin2; | 2724 | int apic1, pin1, apic2, pin2; |
2755 | unsigned long flags; | 2725 | unsigned long flags; |
@@ -2935,7 +2905,7 @@ void __init setup_IO_APIC(void) | |||
2935 | } | 2905 | } |
2936 | 2906 | ||
2937 | /* | 2907 | /* |
2938 | * Called after all the initialization is done. If we didnt find any | 2908 | * Called after all the initialization is done. If we didn't find any |
2939 | * APIC bugs then we can allow the modify fast path | 2909 | * APIC bugs then we can allow the modify fast path |
2940 | */ | 2910 | */ |
2941 | 2911 | ||
@@ -3060,7 +3030,7 @@ unsigned int create_irq_nr(unsigned int from, int node) | |||
3060 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 3030 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3061 | 3031 | ||
3062 | if (ret) { | 3032 | if (ret) { |
3063 | set_irq_chip_data(irq, cfg); | 3033 | irq_set_chip_data(irq, cfg); |
3064 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | 3034 | irq_clear_status_flags(irq, IRQ_NOREQUEST); |
3065 | } else { | 3035 | } else { |
3066 | free_irq_at(irq, cfg); | 3036 | free_irq_at(irq, cfg); |
@@ -3085,7 +3055,7 @@ int create_irq(void) | |||
3085 | 3055 | ||
3086 | void destroy_irq(unsigned int irq) | 3056 | void destroy_irq(unsigned int irq) |
3087 | { | 3057 | { |
3088 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 3058 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
3089 | unsigned long flags; | 3059 | unsigned long flags; |
3090 | 3060 | ||
3091 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); | 3061 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); |
@@ -3119,7 +3089,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3119 | 3089 | ||
3120 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); | 3090 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); |
3121 | 3091 | ||
3122 | if (irq_remapped(get_irq_chip_data(irq))) { | 3092 | if (irq_remapped(cfg)) { |
3123 | struct irte irte; | 3093 | struct irte irte; |
3124 | int ir_index; | 3094 | int ir_index; |
3125 | u16 sub_handle; | 3095 | u16 sub_handle; |
@@ -3291,6 +3261,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) | |||
3291 | 3261 | ||
3292 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | 3262 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) |
3293 | { | 3263 | { |
3264 | struct irq_chip *chip = &msi_chip; | ||
3294 | struct msi_msg msg; | 3265 | struct msi_msg msg; |
3295 | int ret; | 3266 | int ret; |
3296 | 3267 | ||
@@ -3298,14 +3269,15 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3298 | if (ret < 0) | 3269 | if (ret < 0) |
3299 | return ret; | 3270 | return ret; |
3300 | 3271 | ||
3301 | set_irq_msi(irq, msidesc); | 3272 | irq_set_msi_desc(irq, msidesc); |
3302 | write_msi_msg(irq, &msg); | 3273 | write_msi_msg(irq, &msg); |
3303 | 3274 | ||
3304 | if (irq_remapped(get_irq_chip_data(irq))) { | 3275 | if (irq_remapped(irq_get_chip_data(irq))) { |
3305 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3276 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3306 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); | 3277 | chip = &msi_ir_chip; |
3307 | } else | 3278 | } |
3308 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | 3279 | |
3280 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
3309 | 3281 | ||
3310 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); | 3282 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); |
3311 | 3283 | ||
@@ -3423,8 +3395,8 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3423 | if (ret < 0) | 3395 | if (ret < 0) |
3424 | return ret; | 3396 | return ret; |
3425 | dmar_msi_write(irq, &msg); | 3397 | dmar_msi_write(irq, &msg); |
3426 | set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, | 3398 | irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, |
3427 | "edge"); | 3399 | "edge"); |
3428 | return 0; | 3400 | return 0; |
3429 | } | 3401 | } |
3430 | #endif | 3402 | #endif |
@@ -3482,6 +3454,7 @@ static struct irq_chip hpet_msi_type = { | |||
3482 | 3454 | ||
3483 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 3455 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) |
3484 | { | 3456 | { |
3457 | struct irq_chip *chip = &hpet_msi_type; | ||
3485 | struct msi_msg msg; | 3458 | struct msi_msg msg; |
3486 | int ret; | 3459 | int ret; |
3487 | 3460 | ||
@@ -3501,15 +3474,12 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | |||
3501 | if (ret < 0) | 3474 | if (ret < 0) |
3502 | return ret; | 3475 | return ret; |
3503 | 3476 | ||
3504 | hpet_msi_write(get_irq_data(irq), &msg); | 3477 | hpet_msi_write(irq_get_handler_data(irq), &msg); |
3505 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3478 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3506 | if (irq_remapped(get_irq_chip_data(irq))) | 3479 | if (irq_remapped(irq_get_chip_data(irq))) |
3507 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, | 3480 | chip = &ir_hpet_msi_type; |
3508 | handle_edge_irq, "edge"); | ||
3509 | else | ||
3510 | set_irq_chip_and_handler_name(irq, &hpet_msi_type, | ||
3511 | handle_edge_irq, "edge"); | ||
3512 | 3481 | ||
3482 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
3513 | return 0; | 3483 | return 0; |
3514 | } | 3484 | } |
3515 | #endif | 3485 | #endif |
@@ -3596,7 +3566,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3596 | 3566 | ||
3597 | write_ht_irq_msg(irq, &msg); | 3567 | write_ht_irq_msg(irq, &msg); |
3598 | 3568 | ||
3599 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, | 3569 | irq_set_chip_and_handler_name(irq, &ht_irq_chip, |
3600 | handle_edge_irq, "edge"); | 3570 | handle_edge_irq, "edge"); |
3601 | 3571 | ||
3602 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); | 3572 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); |
@@ -3605,7 +3575,40 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3605 | } | 3575 | } |
3606 | #endif /* CONFIG_HT_IRQ */ | 3576 | #endif /* CONFIG_HT_IRQ */ |
3607 | 3577 | ||
3608 | int __init io_apic_get_redir_entries (int ioapic) | 3578 | int |
3579 | io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) | ||
3580 | { | ||
3581 | struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); | ||
3582 | int ret; | ||
3583 | |||
3584 | if (!cfg) | ||
3585 | return -EINVAL; | ||
3586 | ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); | ||
3587 | if (!ret) | ||
3588 | setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg, | ||
3589 | attr->trigger, attr->polarity); | ||
3590 | return ret; | ||
3591 | } | ||
3592 | |||
3593 | static int io_apic_setup_irq_pin_once(unsigned int irq, int node, | ||
3594 | struct io_apic_irq_attr *attr) | ||
3595 | { | ||
3596 | unsigned int id = attr->ioapic, pin = attr->ioapic_pin; | ||
3597 | int ret; | ||
3598 | |||
3599 | /* Avoid redundant programming */ | ||
3600 | if (test_bit(pin, mp_ioapic_routing[id].pin_programmed)) { | ||
3601 | pr_debug("Pin %d-%d already programmed\n", | ||
3602 | mp_ioapics[id].apicid, pin); | ||
3603 | return 0; | ||
3604 | } | ||
3605 | ret = io_apic_setup_irq_pin(irq, node, attr); | ||
3606 | if (!ret) | ||
3607 | set_bit(pin, mp_ioapic_routing[id].pin_programmed); | ||
3608 | return ret; | ||
3609 | } | ||
3610 | |||
3611 | static int __init io_apic_get_redir_entries(int ioapic) | ||
3609 | { | 3612 | { |
3610 | union IO_APIC_reg_01 reg_01; | 3613 | union IO_APIC_reg_01 reg_01; |
3611 | unsigned long flags; | 3614 | unsigned long flags; |
@@ -3659,96 +3662,24 @@ int __init arch_probe_nr_irqs(void) | |||
3659 | } | 3662 | } |
3660 | #endif | 3663 | #endif |
3661 | 3664 | ||
3662 | static int __io_apic_set_pci_routing(struct device *dev, int irq, | 3665 | int io_apic_set_pci_routing(struct device *dev, int irq, |
3663 | struct io_apic_irq_attr *irq_attr) | 3666 | struct io_apic_irq_attr *irq_attr) |
3664 | { | 3667 | { |
3665 | struct irq_cfg *cfg; | ||
3666 | int node; | 3668 | int node; |
3667 | int ioapic, pin; | ||
3668 | int trigger, polarity; | ||
3669 | 3669 | ||
3670 | ioapic = irq_attr->ioapic; | ||
3671 | if (!IO_APIC_IRQ(irq)) { | 3670 | if (!IO_APIC_IRQ(irq)) { |
3672 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", | 3671 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", |
3673 | ioapic); | 3672 | irq_attr->ioapic); |
3674 | return -EINVAL; | 3673 | return -EINVAL; |
3675 | } | 3674 | } |
3676 | 3675 | ||
3677 | if (dev) | 3676 | node = dev ? dev_to_node(dev) : cpu_to_node(0); |
3678 | node = dev_to_node(dev); | ||
3679 | else | ||
3680 | node = cpu_to_node(0); | ||
3681 | |||
3682 | cfg = alloc_irq_and_cfg_at(irq, node); | ||
3683 | if (!cfg) | ||
3684 | return 0; | ||
3685 | |||
3686 | pin = irq_attr->ioapic_pin; | ||
3687 | trigger = irq_attr->trigger; | ||
3688 | polarity = irq_attr->polarity; | ||
3689 | 3677 | ||
3690 | /* | 3678 | return io_apic_setup_irq_pin_once(irq, node, irq_attr); |
3691 | * IRQs < 16 are already in the irq_2_pin[] map | ||
3692 | */ | ||
3693 | if (irq >= legacy_pic->nr_legacy_irqs) { | ||
3694 | if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { | ||
3695 | printk(KERN_INFO "can not add pin %d for irq %d\n", | ||
3696 | pin, irq); | ||
3697 | return 0; | ||
3698 | } | ||
3699 | } | ||
3700 | |||
3701 | setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); | ||
3702 | |||
3703 | return 0; | ||
3704 | } | 3679 | } |
3705 | 3680 | ||
3706 | int io_apic_set_pci_routing(struct device *dev, int irq, | ||
3707 | struct io_apic_irq_attr *irq_attr) | ||
3708 | { | ||
3709 | int ioapic, pin; | ||
3710 | /* | ||
3711 | * Avoid pin reprogramming. PRTs typically include entries | ||
3712 | * with redundant pin->gsi mappings (but unique PCI devices); | ||
3713 | * we only program the IOAPIC on the first. | ||
3714 | */ | ||
3715 | ioapic = irq_attr->ioapic; | ||
3716 | pin = irq_attr->ioapic_pin; | ||
3717 | if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) { | ||
3718 | pr_debug("Pin %d-%d already programmed\n", | ||
3719 | mp_ioapics[ioapic].apicid, pin); | ||
3720 | return 0; | ||
3721 | } | ||
3722 | set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed); | ||
3723 | |||
3724 | return __io_apic_set_pci_routing(dev, irq, irq_attr); | ||
3725 | } | ||
3726 | |||
3727 | u8 __init io_apic_unique_id(u8 id) | ||
3728 | { | ||
3729 | #ifdef CONFIG_X86_32 | 3681 | #ifdef CONFIG_X86_32 |
3730 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | 3682 | static int __init io_apic_get_unique_id(int ioapic, int apic_id) |
3731 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
3732 | return io_apic_get_unique_id(nr_ioapics, id); | ||
3733 | else | ||
3734 | return id; | ||
3735 | #else | ||
3736 | int i; | ||
3737 | DECLARE_BITMAP(used, 256); | ||
3738 | |||
3739 | bitmap_zero(used, 256); | ||
3740 | for (i = 0; i < nr_ioapics; i++) { | ||
3741 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
3742 | __set_bit(ia->apicid, used); | ||
3743 | } | ||
3744 | if (!test_bit(id, used)) | ||
3745 | return id; | ||
3746 | return find_first_zero_bit(used, 256); | ||
3747 | #endif | ||
3748 | } | ||
3749 | |||
3750 | #ifdef CONFIG_X86_32 | ||
3751 | int __init io_apic_get_unique_id(int ioapic, int apic_id) | ||
3752 | { | 3683 | { |
3753 | union IO_APIC_reg_00 reg_00; | 3684 | union IO_APIC_reg_00 reg_00; |
3754 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; | 3685 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; |
@@ -3821,9 +3752,33 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
3821 | 3752 | ||
3822 | return apic_id; | 3753 | return apic_id; |
3823 | } | 3754 | } |
3755 | |||
3756 | static u8 __init io_apic_unique_id(u8 id) | ||
3757 | { | ||
3758 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
3759 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
3760 | return io_apic_get_unique_id(nr_ioapics, id); | ||
3761 | else | ||
3762 | return id; | ||
3763 | } | ||
3764 | #else | ||
3765 | static u8 __init io_apic_unique_id(u8 id) | ||
3766 | { | ||
3767 | int i; | ||
3768 | DECLARE_BITMAP(used, 256); | ||
3769 | |||
3770 | bitmap_zero(used, 256); | ||
3771 | for (i = 0; i < nr_ioapics; i++) { | ||
3772 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
3773 | __set_bit(ia->apicid, used); | ||
3774 | } | ||
3775 | if (!test_bit(id, used)) | ||
3776 | return id; | ||
3777 | return find_first_zero_bit(used, 256); | ||
3778 | } | ||
3824 | #endif | 3779 | #endif |
3825 | 3780 | ||
3826 | int __init io_apic_get_version(int ioapic) | 3781 | static int __init io_apic_get_version(int ioapic) |
3827 | { | 3782 | { |
3828 | union IO_APIC_reg_01 reg_01; | 3783 | union IO_APIC_reg_01 reg_01; |
3829 | unsigned long flags; | 3784 | unsigned long flags; |
@@ -3868,8 +3823,8 @@ int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) | |||
3868 | void __init setup_ioapic_dest(void) | 3823 | void __init setup_ioapic_dest(void) |
3869 | { | 3824 | { |
3870 | int pin, ioapic, irq, irq_entry; | 3825 | int pin, ioapic, irq, irq_entry; |
3871 | struct irq_desc *desc; | ||
3872 | const struct cpumask *mask; | 3826 | const struct cpumask *mask; |
3827 | struct irq_data *idata; | ||
3873 | 3828 | ||
3874 | if (skip_ioapic_setup == 1) | 3829 | if (skip_ioapic_setup == 1) |
3875 | return; | 3830 | return; |
@@ -3884,21 +3839,20 @@ void __init setup_ioapic_dest(void) | |||
3884 | if ((ioapic > 0) && (irq > 16)) | 3839 | if ((ioapic > 0) && (irq > 16)) |
3885 | continue; | 3840 | continue; |
3886 | 3841 | ||
3887 | desc = irq_to_desc(irq); | 3842 | idata = irq_get_irq_data(irq); |
3888 | 3843 | ||
3889 | /* | 3844 | /* |
3890 | * Honour affinities which have been set in early boot | 3845 | * Honour affinities which have been set in early boot |
3891 | */ | 3846 | */ |
3892 | if (desc->status & | 3847 | if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) |
3893 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 3848 | mask = idata->affinity; |
3894 | mask = desc->irq_data.affinity; | ||
3895 | else | 3849 | else |
3896 | mask = apic->target_cpus(); | 3850 | mask = apic->target_cpus(); |
3897 | 3851 | ||
3898 | if (intr_remapping_enabled) | 3852 | if (intr_remapping_enabled) |
3899 | ir_ioapic_set_affinity(&desc->irq_data, mask, false); | 3853 | ir_ioapic_set_affinity(idata, mask, false); |
3900 | else | 3854 | else |
3901 | ioapic_set_affinity(&desc->irq_data, mask, false); | 3855 | ioapic_set_affinity(idata, mask, false); |
3902 | } | 3856 | } |
3903 | 3857 | ||
3904 | } | 3858 | } |
@@ -4026,10 +3980,10 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi) | |||
4026 | return gsi - mp_gsi_routing[ioapic].gsi_base; | 3980 | return gsi - mp_gsi_routing[ioapic].gsi_base; |
4027 | } | 3981 | } |
4028 | 3982 | ||
4029 | static int bad_ioapic(unsigned long address) | 3983 | static __init int bad_ioapic(unsigned long address) |
4030 | { | 3984 | { |
4031 | if (nr_ioapics >= MAX_IO_APICS) { | 3985 | if (nr_ioapics >= MAX_IO_APICS) { |
4032 | printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " | 3986 | printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded " |
4033 | "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); | 3987 | "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); |
4034 | return 1; | 3988 | return 1; |
4035 | } | 3989 | } |
@@ -4086,20 +4040,16 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
4086 | /* Enable IOAPIC early just for system timer */ | 4040 | /* Enable IOAPIC early just for system timer */ |
4087 | void __init pre_init_apic_IRQ0(void) | 4041 | void __init pre_init_apic_IRQ0(void) |
4088 | { | 4042 | { |
4089 | struct irq_cfg *cfg; | 4043 | struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; |
4090 | 4044 | ||
4091 | printk(KERN_INFO "Early APIC setup for system timer0\n"); | 4045 | printk(KERN_INFO "Early APIC setup for system timer0\n"); |
4092 | #ifndef CONFIG_SMP | 4046 | #ifndef CONFIG_SMP |
4093 | physid_set_mask_of_physid(boot_cpu_physical_apicid, | 4047 | physid_set_mask_of_physid(boot_cpu_physical_apicid, |
4094 | &phys_cpu_present_map); | 4048 | &phys_cpu_present_map); |
4095 | #endif | 4049 | #endif |
4096 | /* Make sure the irq descriptor is set up */ | ||
4097 | cfg = alloc_irq_and_cfg_at(0, 0); | ||
4098 | |||
4099 | setup_local_APIC(); | 4050 | setup_local_APIC(); |
4100 | 4051 | ||
4101 | add_pin_to_irq_node(cfg, 0, 0, 0); | 4052 | io_apic_setup_irq_pin(0, 0, &attr); |
4102 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | 4053 | irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, |
4103 | 4054 | "edge"); | |
4104 | setup_ioapic_irq(0, 0, 0, cfg, 0, 0); | ||
4105 | } | 4055 | } |
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 08385e090a6f..cce91bf26676 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, | |||
56 | local_irq_restore(flags); | 56 | local_irq_restore(flags); |
57 | } | 57 | } |
58 | 58 | ||
59 | #ifdef CONFIG_X86_32 | ||
60 | |||
59 | void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, | 61 | void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, |
60 | int vector) | 62 | int vector) |
61 | { | 63 | { |
@@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, | |||
71 | local_irq_save(flags); | 73 | local_irq_save(flags); |
72 | for_each_cpu(query_cpu, mask) | 74 | for_each_cpu(query_cpu, mask) |
73 | __default_send_IPI_dest_field( | 75 | __default_send_IPI_dest_field( |
74 | apic->cpu_to_logical_apicid(query_cpu), vector, | 76 | early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
75 | apic->dest_logical); | 77 | vector, apic->dest_logical); |
76 | local_irq_restore(flags); | 78 | local_irq_restore(flags); |
77 | } | 79 | } |
78 | 80 | ||
@@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, | |||
90 | if (query_cpu == this_cpu) | 92 | if (query_cpu == this_cpu) |
91 | continue; | 93 | continue; |
92 | __default_send_IPI_dest_field( | 94 | __default_send_IPI_dest_field( |
93 | apic->cpu_to_logical_apicid(query_cpu), vector, | 95 | early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
94 | apic->dest_logical); | 96 | vector, apic->dest_logical); |
95 | } | 97 | } |
96 | local_irq_restore(flags); | 98 | local_irq_restore(flags); |
97 | } | 99 | } |
98 | 100 | ||
99 | #ifdef CONFIG_X86_32 | ||
100 | |||
101 | /* | 101 | /* |
102 | * This is only used on smaller machines. | 102 | * This is only used on smaller machines. |
103 | */ | 103 | */ |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 960f26ab5c9f..6273eee5134b 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask | |||
373 | return physids_promote(0xFUL, retmap); | 373 | return physids_promote(0xFUL, retmap); |
374 | } | 374 | } |
375 | 375 | ||
376 | static inline int numaq_cpu_to_logical_apicid(int cpu) | ||
377 | { | ||
378 | if (cpu >= nr_cpu_ids) | ||
379 | return BAD_APICID; | ||
380 | return cpu_2_logical_apicid[cpu]; | ||
381 | } | ||
382 | |||
383 | /* | 376 | /* |
384 | * Supporting over 60 cpus on NUMA-Q requires a locality-dependent | 377 | * Supporting over 60 cpus on NUMA-Q requires a locality-dependent |
385 | * cpu to APIC ID relation to properly interact with the intelligent | 378 | * cpu to APIC ID relation to properly interact with the intelligent |
@@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid) | |||
398 | return logical_apicid >> 4; | 391 | return logical_apicid >> 4; |
399 | } | 392 | } |
400 | 393 | ||
394 | static int numaq_numa_cpu_node(int cpu) | ||
395 | { | ||
396 | int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
397 | |||
398 | if (logical_apicid != BAD_APICID) | ||
399 | return numaq_apicid_to_node(logical_apicid); | ||
400 | return NUMA_NO_NODE; | ||
401 | } | ||
402 | |||
401 | static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) | 403 | static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) |
402 | { | 404 | { |
403 | int node = numaq_apicid_to_node(logical_apicid); | 405 | int node = numaq_apicid_to_node(logical_apicid); |
@@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = { | |||
508 | .ioapic_phys_id_map = numaq_ioapic_phys_id_map, | 510 | .ioapic_phys_id_map = numaq_ioapic_phys_id_map, |
509 | .setup_apic_routing = numaq_setup_apic_routing, | 511 | .setup_apic_routing = numaq_setup_apic_routing, |
510 | .multi_timer_check = numaq_multi_timer_check, | 512 | .multi_timer_check = numaq_multi_timer_check, |
511 | .apicid_to_node = numaq_apicid_to_node, | ||
512 | .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, | ||
513 | .cpu_present_to_apicid = numaq_cpu_present_to_apicid, | 513 | .cpu_present_to_apicid = numaq_cpu_present_to_apicid, |
514 | .apicid_to_cpu_present = numaq_apicid_to_cpu_present, | 514 | .apicid_to_cpu_present = numaq_apicid_to_cpu_present, |
515 | .setup_portio_remap = numaq_setup_portio_remap, | 515 | .setup_portio_remap = numaq_setup_portio_remap, |
@@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = { | |||
547 | .icr_write = native_apic_icr_write, | 547 | .icr_write = native_apic_icr_write, |
548 | .wait_icr_idle = native_apic_wait_icr_idle, | 548 | .wait_icr_idle = native_apic_wait_icr_idle, |
549 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 549 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
550 | |||
551 | .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, | ||
552 | .x86_32_numa_cpu_node = numaq_numa_cpu_node, | ||
550 | }; | 553 | }; |
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 99d2fe016084..fc84c7b61108 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
@@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void) | |||
77 | apic->setup_apic_routing(); | 77 | apic->setup_apic_routing(); |
78 | } | 78 | } |
79 | 79 | ||
80 | static int default_x86_32_early_logical_apicid(int cpu) | ||
81 | { | ||
82 | return 1 << cpu; | ||
83 | } | ||
84 | |||
80 | static void setup_apic_flat_routing(void) | 85 | static void setup_apic_flat_routing(void) |
81 | { | 86 | { |
82 | #ifdef CONFIG_X86_IO_APIC | 87 | #ifdef CONFIG_X86_IO_APIC |
@@ -130,8 +135,6 @@ struct apic apic_default = { | |||
130 | .ioapic_phys_id_map = default_ioapic_phys_id_map, | 135 | .ioapic_phys_id_map = default_ioapic_phys_id_map, |
131 | .setup_apic_routing = setup_apic_flat_routing, | 136 | .setup_apic_routing = setup_apic_flat_routing, |
132 | .multi_timer_check = NULL, | 137 | .multi_timer_check = NULL, |
133 | .apicid_to_node = default_apicid_to_node, | ||
134 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, | ||
135 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 138 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
136 | .apicid_to_cpu_present = physid_set_mask_of_physid, | 139 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
137 | .setup_portio_remap = NULL, | 140 | .setup_portio_remap = NULL, |
@@ -167,6 +170,9 @@ struct apic apic_default = { | |||
167 | .icr_write = native_apic_icr_write, | 170 | .icr_write = native_apic_icr_write, |
168 | .wait_icr_idle = native_apic_wait_icr_idle, | 171 | .wait_icr_idle = native_apic_wait_icr_idle, |
169 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 172 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
173 | |||
174 | .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid, | ||
175 | .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, | ||
170 | }; | 176 | }; |
171 | 177 | ||
172 | extern struct apic apic_numaq; | 178 | extern struct apic apic_numaq; |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 9b419263d90d..e4b8059b414a 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit) | |||
194 | return 1; | 194 | return 1; |
195 | } | 195 | } |
196 | 196 | ||
197 | static void summit_init_apic_ldr(void) | 197 | static int summit_early_logical_apicid(int cpu) |
198 | { | 198 | { |
199 | unsigned long val, id; | ||
200 | int count = 0; | 199 | int count = 0; |
201 | u8 my_id = (u8)hard_smp_processor_id(); | 200 | u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu); |
202 | u8 my_cluster = APIC_CLUSTER(my_id); | 201 | u8 my_cluster = APIC_CLUSTER(my_id); |
203 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
204 | u8 lid; | 203 | u8 lid; |
@@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void) | |||
206 | 205 | ||
207 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | 206 | /* Create logical APIC IDs by counting CPUs already in cluster. */ |
208 | for (count = 0, i = nr_cpu_ids; --i >= 0; ) { | 207 | for (count = 0, i = nr_cpu_ids; --i >= 0; ) { |
209 | lid = cpu_2_logical_apicid[i]; | 208 | lid = early_per_cpu(x86_cpu_to_logical_apicid, i); |
210 | if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) | 209 | if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) |
211 | ++count; | 210 | ++count; |
212 | } | 211 | } |
@@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void) | |||
214 | /* We only have a 4 wide bitmap in cluster mode. If a deranged | 213 | /* We only have a 4 wide bitmap in cluster mode. If a deranged |
215 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ | 214 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ |
216 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); | 215 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); |
217 | id = my_cluster | (1UL << count); | 216 | return my_cluster | (1UL << count); |
217 | } | ||
218 | |||
219 | static void summit_init_apic_ldr(void) | ||
220 | { | ||
221 | int cpu = smp_processor_id(); | ||
222 | unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
223 | unsigned long val; | ||
224 | |||
218 | apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); | 225 | apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); |
219 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | 226 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; |
220 | val |= SET_APIC_LOGICAL_ID(id); | 227 | val |= SET_APIC_LOGICAL_ID(id); |
@@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void) | |||
232 | nr_ioapics); | 239 | nr_ioapics); |
233 | } | 240 | } |
234 | 241 | ||
235 | static int summit_apicid_to_node(int logical_apicid) | ||
236 | { | ||
237 | #ifdef CONFIG_SMP | ||
238 | return apicid_2_node[hard_smp_processor_id()]; | ||
239 | #else | ||
240 | return 0; | ||
241 | #endif | ||
242 | } | ||
243 | |||
244 | /* Mapping from cpu number to logical apicid */ | ||
245 | static inline int summit_cpu_to_logical_apicid(int cpu) | ||
246 | { | ||
247 | #ifdef CONFIG_SMP | ||
248 | if (cpu >= nr_cpu_ids) | ||
249 | return BAD_APICID; | ||
250 | return cpu_2_logical_apicid[cpu]; | ||
251 | #else | ||
252 | return logical_smp_processor_id(); | ||
253 | #endif | ||
254 | } | ||
255 | |||
256 | static int summit_cpu_present_to_apicid(int mps_cpu) | 242 | static int summit_cpu_present_to_apicid(int mps_cpu) |
257 | { | 243 | { |
258 | if (mps_cpu < nr_cpu_ids) | 244 | if (mps_cpu < nr_cpu_ids) |
@@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) | |||
286 | * The cpus in the mask must all be on the apic cluster. | 272 | * The cpus in the mask must all be on the apic cluster. |
287 | */ | 273 | */ |
288 | for_each_cpu(cpu, cpumask) { | 274 | for_each_cpu(cpu, cpumask) { |
289 | int new_apicid = summit_cpu_to_logical_apicid(cpu); | 275 | int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); |
290 | 276 | ||
291 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | 277 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { |
292 | printk("%s: Not a valid mask!\n", __func__); | 278 | printk("%s: Not a valid mask!\n", __func__); |
@@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) | |||
301 | static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, | 287 | static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, |
302 | const struct cpumask *andmask) | 288 | const struct cpumask *andmask) |
303 | { | 289 | { |
304 | int apicid = summit_cpu_to_logical_apicid(0); | 290 | int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); |
305 | cpumask_var_t cpumask; | 291 | cpumask_var_t cpumask; |
306 | 292 | ||
307 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | 293 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) |
@@ -528,8 +514,6 @@ struct apic apic_summit = { | |||
528 | .ioapic_phys_id_map = summit_ioapic_phys_id_map, | 514 | .ioapic_phys_id_map = summit_ioapic_phys_id_map, |
529 | .setup_apic_routing = summit_setup_apic_routing, | 515 | .setup_apic_routing = summit_setup_apic_routing, |
530 | .multi_timer_check = NULL, | 516 | .multi_timer_check = NULL, |
531 | .apicid_to_node = summit_apicid_to_node, | ||
532 | .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, | ||
533 | .cpu_present_to_apicid = summit_cpu_present_to_apicid, | 517 | .cpu_present_to_apicid = summit_cpu_present_to_apicid, |
534 | .apicid_to_cpu_present = summit_apicid_to_cpu_present, | 518 | .apicid_to_cpu_present = summit_apicid_to_cpu_present, |
535 | .setup_portio_remap = NULL, | 519 | .setup_portio_remap = NULL, |
@@ -565,4 +549,7 @@ struct apic apic_summit = { | |||
565 | .icr_write = native_apic_icr_write, | 549 | .icr_write = native_apic_icr_write, |
566 | .wait_icr_idle = native_apic_wait_icr_idle, | 550 | .wait_icr_idle = native_apic_wait_icr_idle, |
567 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 551 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
552 | |||
553 | .x86_32_early_logical_apicid = summit_early_logical_apicid, | ||
554 | .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, | ||
568 | }; | 555 | }; |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index cf69c59f4910..90949bbd566d 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = { | |||
206 | .ioapic_phys_id_map = NULL, | 206 | .ioapic_phys_id_map = NULL, |
207 | .setup_apic_routing = NULL, | 207 | .setup_apic_routing = NULL, |
208 | .multi_timer_check = NULL, | 208 | .multi_timer_check = NULL, |
209 | .apicid_to_node = NULL, | ||
210 | .cpu_to_logical_apicid = NULL, | ||
211 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 209 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
212 | .apicid_to_cpu_present = NULL, | 210 | .apicid_to_cpu_present = NULL, |
213 | .setup_portio_remap = NULL, | 211 | .setup_portio_remap = NULL, |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 8972f38c5ced..c7e6d6645bf4 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = { | |||
195 | .ioapic_phys_id_map = NULL, | 195 | .ioapic_phys_id_map = NULL, |
196 | .setup_apic_routing = NULL, | 196 | .setup_apic_routing = NULL, |
197 | .multi_timer_check = NULL, | 197 | .multi_timer_check = NULL, |
198 | .apicid_to_node = NULL, | ||
199 | .cpu_to_logical_apicid = NULL, | ||
200 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 198 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
201 | .apicid_to_cpu_present = NULL, | 199 | .apicid_to_cpu_present = NULL, |
202 | .setup_portio_remap = NULL, | 200 | .setup_portio_remap = NULL, |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index bd16b58b8850..3c289281394c 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = { | |||
338 | .ioapic_phys_id_map = NULL, | 338 | .ioapic_phys_id_map = NULL, |
339 | .setup_apic_routing = NULL, | 339 | .setup_apic_routing = NULL, |
340 | .multi_timer_check = NULL, | 340 | .multi_timer_check = NULL, |
341 | .apicid_to_node = NULL, | ||
342 | .cpu_to_logical_apicid = NULL, | ||
343 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 341 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
344 | .apicid_to_cpu_present = NULL, | 342 | .apicid_to_cpu_present = NULL, |
345 | .setup_portio_remap = NULL, | 343 | .setup_portio_remap = NULL, |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 0e4f24c2a746..0b4be431c620 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -66,7 +66,7 @@ | |||
66 | * 1.5: Fix segment register reloading (in case of bad segments saved | 66 | * 1.5: Fix segment register reloading (in case of bad segments saved |
67 | * across BIOS call). | 67 | * across BIOS call). |
68 | * Stephen Rothwell | 68 | * Stephen Rothwell |
69 | * 1.6: Cope with complier/assembler differences. | 69 | * 1.6: Cope with compiler/assembler differences. |
70 | * Only try to turn off the first display device. | 70 | * Only try to turn off the first display device. |
71 | * Fix OOPS at power off with no APM BIOS by Jan Echternach | 71 | * Fix OOPS at power off with no APM BIOS by Jan Echternach |
72 | * <echter@informatik.uni-rostock.de> | 72 | * <echter@informatik.uni-rostock.de> |
@@ -227,6 +227,7 @@ | |||
227 | #include <linux/suspend.h> | 227 | #include <linux/suspend.h> |
228 | #include <linux/kthread.h> | 228 | #include <linux/kthread.h> |
229 | #include <linux/jiffies.h> | 229 | #include <linux/jiffies.h> |
230 | #include <linux/acpi.h> | ||
230 | 231 | ||
231 | #include <asm/system.h> | 232 | #include <asm/system.h> |
232 | #include <asm/uaccess.h> | 233 | #include <asm/uaccess.h> |
@@ -975,20 +976,10 @@ recalc: | |||
975 | 976 | ||
976 | static void apm_power_off(void) | 977 | static void apm_power_off(void) |
977 | { | 978 | { |
978 | unsigned char po_bios_call[] = { | ||
979 | 0xb8, 0x00, 0x10, /* movw $0x1000,ax */ | ||
980 | 0x8e, 0xd0, /* movw ax,ss */ | ||
981 | 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */ | ||
982 | 0xb8, 0x07, 0x53, /* movw $0x5307,ax */ | ||
983 | 0xbb, 0x01, 0x00, /* movw $0x0001,bx */ | ||
984 | 0xb9, 0x03, 0x00, /* movw $0x0003,cx */ | ||
985 | 0xcd, 0x15 /* int $0x15 */ | ||
986 | }; | ||
987 | |||
988 | /* Some bioses don't like being called from CPU != 0 */ | 979 | /* Some bioses don't like being called from CPU != 0 */ |
989 | if (apm_info.realmode_power_off) { | 980 | if (apm_info.realmode_power_off) { |
990 | set_cpus_allowed_ptr(current, cpumask_of(0)); | 981 | set_cpus_allowed_ptr(current, cpumask_of(0)); |
991 | machine_real_restart(po_bios_call, sizeof(po_bios_call)); | 982 | machine_real_restart(MRR_APM); |
992 | } else { | 983 | } else { |
993 | (void)set_system_power_state(APM_STATE_OFF); | 984 | (void)set_system_power_state(APM_STATE_OFF); |
994 | } | 985 | } |
@@ -2331,12 +2322,11 @@ static int __init apm_init(void) | |||
2331 | apm_info.disabled = 1; | 2322 | apm_info.disabled = 1; |
2332 | return -ENODEV; | 2323 | return -ENODEV; |
2333 | } | 2324 | } |
2334 | if (pm_flags & PM_ACPI) { | 2325 | if (!acpi_disabled) { |
2335 | printk(KERN_NOTICE "apm: overridden by ACPI.\n"); | 2326 | printk(KERN_NOTICE "apm: overridden by ACPI.\n"); |
2336 | apm_info.disabled = 1; | 2327 | apm_info.disabled = 1; |
2337 | return -ENODEV; | 2328 | return -ENODEV; |
2338 | } | 2329 | } |
2339 | pm_flags |= PM_APM; | ||
2340 | 2330 | ||
2341 | /* | 2331 | /* |
2342 | * Set up the long jump entry point to the APM BIOS, which is called | 2332 | * Set up the long jump entry point to the APM BIOS, which is called |
@@ -2428,7 +2418,6 @@ static void __exit apm_exit(void) | |||
2428 | kthread_stop(kapmd_task); | 2418 | kthread_stop(kapmd_task); |
2429 | kapmd_task = NULL; | 2419 | kapmd_task = NULL; |
2430 | } | 2420 | } |
2431 | pm_flags &= ~PM_APM; | ||
2432 | } | 2421 | } |
2433 | 2422 | ||
2434 | module_init(apm_init); | 2423 | module_init(apm_init); |
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index cfa82c899f47..4f13fafc5264 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c | |||
@@ -1,5 +1,70 @@ | |||
1 | /* | ||
2 | * Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed to extract | ||
4 | * and format the required data. | ||
5 | */ | ||
6 | #define COMPILE_OFFSETS | ||
7 | |||
8 | #include <linux/crypto.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/stddef.h> | ||
11 | #include <linux/hardirq.h> | ||
12 | #include <linux/suspend.h> | ||
13 | #include <linux/kbuild.h> | ||
14 | #include <asm/processor.h> | ||
15 | #include <asm/thread_info.h> | ||
16 | #include <asm/sigframe.h> | ||
17 | #include <asm/bootparam.h> | ||
18 | #include <asm/suspend.h> | ||
19 | |||
20 | #ifdef CONFIG_XEN | ||
21 | #include <xen/interface/xen.h> | ||
22 | #endif | ||
23 | |||
1 | #ifdef CONFIG_X86_32 | 24 | #ifdef CONFIG_X86_32 |
2 | # include "asm-offsets_32.c" | 25 | # include "asm-offsets_32.c" |
3 | #else | 26 | #else |
4 | # include "asm-offsets_64.c" | 27 | # include "asm-offsets_64.c" |
5 | #endif | 28 | #endif |
29 | |||
30 | void common(void) { | ||
31 | BLANK(); | ||
32 | OFFSET(TI_flags, thread_info, flags); | ||
33 | OFFSET(TI_status, thread_info, status); | ||
34 | OFFSET(TI_addr_limit, thread_info, addr_limit); | ||
35 | OFFSET(TI_preempt_count, thread_info, preempt_count); | ||
36 | |||
37 | BLANK(); | ||
38 | OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); | ||
39 | |||
40 | BLANK(); | ||
41 | OFFSET(pbe_address, pbe, address); | ||
42 | OFFSET(pbe_orig_address, pbe, orig_address); | ||
43 | OFFSET(pbe_next, pbe, next); | ||
44 | |||
45 | #ifdef CONFIG_PARAVIRT | ||
46 | BLANK(); | ||
47 | OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); | ||
48 | OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); | ||
49 | OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); | ||
50 | OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); | ||
51 | OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); | ||
52 | OFFSET(PV_CPU_iret, pv_cpu_ops, iret); | ||
53 | OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); | ||
54 | OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); | ||
55 | OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); | ||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_XEN | ||
59 | BLANK(); | ||
60 | OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); | ||
61 | OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); | ||
62 | #endif | ||
63 | |||
64 | BLANK(); | ||
65 | OFFSET(BP_scratch, boot_params, scratch); | ||
66 | OFFSET(BP_loadflags, boot_params, hdr.loadflags); | ||
67 | OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); | ||
68 | OFFSET(BP_version, boot_params, hdr.version); | ||
69 | OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); | ||
70 | } | ||
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 1a4088dda37a..c29d631af6fc 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
@@ -1,26 +1,4 @@ | |||
1 | /* | ||
2 | * Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed | ||
4 | * to extract and format the required data. | ||
5 | */ | ||
6 | |||
7 | #include <linux/crypto.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/signal.h> | ||
10 | #include <linux/personality.h> | ||
11 | #include <linux/suspend.h> | ||
12 | #include <linux/kbuild.h> | ||
13 | #include <asm/ucontext.h> | 1 | #include <asm/ucontext.h> |
14 | #include <asm/sigframe.h> | ||
15 | #include <asm/pgtable.h> | ||
16 | #include <asm/fixmap.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/thread_info.h> | ||
19 | #include <asm/bootparam.h> | ||
20 | #include <asm/elf.h> | ||
21 | #include <asm/suspend.h> | ||
22 | |||
23 | #include <xen/interface/xen.h> | ||
24 | 2 | ||
25 | #include <linux/lguest.h> | 3 | #include <linux/lguest.h> |
26 | #include "../../../drivers/lguest/lg.h" | 4 | #include "../../../drivers/lguest/lg.h" |
@@ -51,21 +29,10 @@ void foo(void) | |||
51 | OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); | 29 | OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); |
52 | BLANK(); | 30 | BLANK(); |
53 | 31 | ||
54 | OFFSET(TI_task, thread_info, task); | ||
55 | OFFSET(TI_exec_domain, thread_info, exec_domain); | ||
56 | OFFSET(TI_flags, thread_info, flags); | ||
57 | OFFSET(TI_status, thread_info, status); | ||
58 | OFFSET(TI_preempt_count, thread_info, preempt_count); | ||
59 | OFFSET(TI_addr_limit, thread_info, addr_limit); | ||
60 | OFFSET(TI_restart_block, thread_info, restart_block); | ||
61 | OFFSET(TI_sysenter_return, thread_info, sysenter_return); | 32 | OFFSET(TI_sysenter_return, thread_info, sysenter_return); |
62 | OFFSET(TI_cpu, thread_info, cpu); | 33 | OFFSET(TI_cpu, thread_info, cpu); |
63 | BLANK(); | 34 | BLANK(); |
64 | 35 | ||
65 | OFFSET(GDS_size, desc_ptr, size); | ||
66 | OFFSET(GDS_address, desc_ptr, address); | ||
67 | BLANK(); | ||
68 | |||
69 | OFFSET(PT_EBX, pt_regs, bx); | 36 | OFFSET(PT_EBX, pt_regs, bx); |
70 | OFFSET(PT_ECX, pt_regs, cx); | 37 | OFFSET(PT_ECX, pt_regs, cx); |
71 | OFFSET(PT_EDX, pt_regs, dx); | 38 | OFFSET(PT_EDX, pt_regs, dx); |
@@ -85,42 +52,13 @@ void foo(void) | |||
85 | OFFSET(PT_OLDSS, pt_regs, ss); | 52 | OFFSET(PT_OLDSS, pt_regs, ss); |
86 | BLANK(); | 53 | BLANK(); |
87 | 54 | ||
88 | OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); | ||
89 | OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext); | 55 | OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext); |
90 | BLANK(); | 56 | BLANK(); |
91 | 57 | ||
92 | OFFSET(pbe_address, pbe, address); | ||
93 | OFFSET(pbe_orig_address, pbe, orig_address); | ||
94 | OFFSET(pbe_next, pbe, next); | ||
95 | |||
96 | /* Offset from the sysenter stack to tss.sp0 */ | 58 | /* Offset from the sysenter stack to tss.sp0 */ |
97 | DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - | 59 | DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - |
98 | sizeof(struct tss_struct)); | 60 | sizeof(struct tss_struct)); |
99 | 61 | ||
100 | DEFINE(PAGE_SIZE_asm, PAGE_SIZE); | ||
101 | DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); | ||
102 | DEFINE(THREAD_SIZE_asm, THREAD_SIZE); | ||
103 | |||
104 | OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); | ||
105 | |||
106 | #ifdef CONFIG_PARAVIRT | ||
107 | BLANK(); | ||
108 | OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); | ||
109 | OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); | ||
110 | OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); | ||
111 | OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); | ||
112 | OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); | ||
113 | OFFSET(PV_CPU_iret, pv_cpu_ops, iret); | ||
114 | OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); | ||
115 | OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); | ||
116 | #endif | ||
117 | |||
118 | #ifdef CONFIG_XEN | ||
119 | BLANK(); | ||
120 | OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); | ||
121 | OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); | ||
122 | #endif | ||
123 | |||
124 | #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) | 62 | #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) |
125 | BLANK(); | 63 | BLANK(); |
126 | OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); | 64 | OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); |
@@ -139,11 +77,4 @@ void foo(void) | |||
139 | OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode); | 77 | OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode); |
140 | OFFSET(LGUEST_PAGES_regs, lguest_pages, regs); | 78 | OFFSET(LGUEST_PAGES_regs, lguest_pages, regs); |
141 | #endif | 79 | #endif |
142 | |||
143 | BLANK(); | ||
144 | OFFSET(BP_scratch, boot_params, scratch); | ||
145 | OFFSET(BP_loadflags, boot_params, hdr.loadflags); | ||
146 | OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); | ||
147 | OFFSET(BP_version, boot_params, hdr.version); | ||
148 | OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); | ||
149 | } | 80 | } |
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 4a6aeedcd965..e72a1194af22 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -1,27 +1,4 @@ | |||
1 | /* | ||
2 | * Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed to extract | ||
4 | * and format the required data. | ||
5 | */ | ||
6 | #define COMPILE_OFFSETS | ||
7 | |||
8 | #include <linux/crypto.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/stddef.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/hardirq.h> | ||
13 | #include <linux/suspend.h> | ||
14 | #include <linux/kbuild.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <asm/segment.h> | ||
17 | #include <asm/thread_info.h> | ||
18 | #include <asm/ia32.h> | 1 | #include <asm/ia32.h> |
19 | #include <asm/bootparam.h> | ||
20 | #include <asm/suspend.h> | ||
21 | |||
22 | #include <xen/interface/xen.h> | ||
23 | |||
24 | #include <asm/sigframe.h> | ||
25 | 2 | ||
26 | #define __NO_STUBS 1 | 3 | #define __NO_STUBS 1 |
27 | #undef __SYSCALL | 4 | #undef __SYSCALL |
@@ -33,41 +10,19 @@ static char syscalls[] = { | |||
33 | 10 | ||
34 | int main(void) | 11 | int main(void) |
35 | { | 12 | { |
36 | #define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry)) | ||
37 | ENTRY(state); | ||
38 | ENTRY(flags); | ||
39 | ENTRY(pid); | ||
40 | BLANK(); | ||
41 | #undef ENTRY | ||
42 | #define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry)) | ||
43 | ENTRY(flags); | ||
44 | ENTRY(addr_limit); | ||
45 | ENTRY(preempt_count); | ||
46 | ENTRY(status); | ||
47 | #ifdef CONFIG_IA32_EMULATION | ||
48 | ENTRY(sysenter_return); | ||
49 | #endif | ||
50 | BLANK(); | ||
51 | #undef ENTRY | ||
52 | #ifdef CONFIG_PARAVIRT | 13 | #ifdef CONFIG_PARAVIRT |
53 | BLANK(); | ||
54 | OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); | ||
55 | OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); | ||
56 | OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); | ||
57 | OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); | ||
58 | OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); | ||
59 | OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame); | 14 | OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame); |
60 | OFFSET(PV_CPU_iret, pv_cpu_ops, iret); | ||
61 | OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32); | 15 | OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32); |
62 | OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); | 16 | OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); |
63 | OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); | ||
64 | OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); | 17 | OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); |
65 | OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); | 18 | BLANK(); |
66 | #endif | 19 | #endif |
67 | 20 | ||
68 | |||
69 | #ifdef CONFIG_IA32_EMULATION | 21 | #ifdef CONFIG_IA32_EMULATION |
70 | #define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry)) | 22 | OFFSET(TI_sysenter_return, thread_info, sysenter_return); |
23 | BLANK(); | ||
24 | |||
25 | #define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry) | ||
71 | ENTRY(ax); | 26 | ENTRY(ax); |
72 | ENTRY(bx); | 27 | ENTRY(bx); |
73 | ENTRY(cx); | 28 | ENTRY(cx); |
@@ -79,15 +34,12 @@ int main(void) | |||
79 | ENTRY(ip); | 34 | ENTRY(ip); |
80 | BLANK(); | 35 | BLANK(); |
81 | #undef ENTRY | 36 | #undef ENTRY |
82 | DEFINE(IA32_RT_SIGFRAME_sigcontext, | 37 | |
83 | offsetof (struct rt_sigframe_ia32, uc.uc_mcontext)); | 38 | OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); |
84 | BLANK(); | 39 | BLANK(); |
85 | #endif | 40 | #endif |
86 | DEFINE(pbe_address, offsetof(struct pbe, address)); | 41 | |
87 | DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); | 42 | #define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry) |
88 | DEFINE(pbe_next, offsetof(struct pbe, next)); | ||
89 | BLANK(); | ||
90 | #define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry)) | ||
91 | ENTRY(bx); | 43 | ENTRY(bx); |
92 | ENTRY(bx); | 44 | ENTRY(bx); |
93 | ENTRY(cx); | 45 | ENTRY(cx); |
@@ -107,7 +59,8 @@ int main(void) | |||
107 | ENTRY(flags); | 59 | ENTRY(flags); |
108 | BLANK(); | 60 | BLANK(); |
109 | #undef ENTRY | 61 | #undef ENTRY |
110 | #define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry)) | 62 | |
63 | #define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry) | ||
111 | ENTRY(cr0); | 64 | ENTRY(cr0); |
112 | ENTRY(cr2); | 65 | ENTRY(cr2); |
113 | ENTRY(cr3); | 66 | ENTRY(cr3); |
@@ -115,26 +68,11 @@ int main(void) | |||
115 | ENTRY(cr8); | 68 | ENTRY(cr8); |
116 | BLANK(); | 69 | BLANK(); |
117 | #undef ENTRY | 70 | #undef ENTRY |
118 | DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist)); | ||
119 | BLANK(); | ||
120 | DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); | ||
121 | BLANK(); | ||
122 | DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); | ||
123 | 71 | ||
72 | OFFSET(TSS_ist, tss_struct, x86_tss.ist); | ||
124 | BLANK(); | 73 | BLANK(); |
125 | OFFSET(BP_scratch, boot_params, scratch); | ||
126 | OFFSET(BP_loadflags, boot_params, hdr.loadflags); | ||
127 | OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); | ||
128 | OFFSET(BP_version, boot_params, hdr.version); | ||
129 | OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); | ||
130 | 74 | ||
131 | BLANK(); | 75 | DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); |
132 | DEFINE(PAGE_SIZE_asm, PAGE_SIZE); | 76 | |
133 | #ifdef CONFIG_XEN | ||
134 | BLANK(); | ||
135 | OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); | ||
136 | OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); | ||
137 | #undef ENTRY | ||
138 | #endif | ||
139 | return 0; | 77 | return 0; |
140 | } | 78 | } |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c7bedb83c5a..3ecece0217ef 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
233 | } | 233 | } |
234 | #endif | 234 | #endif |
235 | 235 | ||
236 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 236 | #ifdef CONFIG_NUMA |
237 | /* | ||
238 | * To workaround broken NUMA config. Read the comment in | ||
239 | * srat_detect_node(). | ||
240 | */ | ||
237 | static int __cpuinit nearby_node(int apicid) | 241 | static int __cpuinit nearby_node(int apicid) |
238 | { | 242 | { |
239 | int i, node; | 243 | int i, node; |
240 | 244 | ||
241 | for (i = apicid - 1; i >= 0; i--) { | 245 | for (i = apicid - 1; i >= 0; i--) { |
242 | node = apicid_to_node[i]; | 246 | node = __apicid_to_node[i]; |
243 | if (node != NUMA_NO_NODE && node_online(node)) | 247 | if (node != NUMA_NO_NODE && node_online(node)) |
244 | return node; | 248 | return node; |
245 | } | 249 | } |
246 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | 250 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { |
247 | node = apicid_to_node[i]; | 251 | node = __apicid_to_node[i]; |
248 | if (node != NUMA_NO_NODE && node_online(node)) | 252 | if (node != NUMA_NO_NODE && node_online(node)) |
249 | return node; | 253 | return node; |
250 | } | 254 | } |
@@ -261,7 +265,7 @@ static int __cpuinit nearby_node(int apicid) | |||
261 | #ifdef CONFIG_X86_HT | 265 | #ifdef CONFIG_X86_HT |
262 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | 266 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) |
263 | { | 267 | { |
264 | u32 nodes; | 268 | u32 nodes, cores_per_cu = 1; |
265 | u8 node_id; | 269 | u8 node_id; |
266 | int cpu = smp_processor_id(); | 270 | int cpu = smp_processor_id(); |
267 | 271 | ||
@@ -276,6 +280,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | |||
276 | /* get compute unit information */ | 280 | /* get compute unit information */ |
277 | smp_num_siblings = ((ebx >> 8) & 3) + 1; | 281 | smp_num_siblings = ((ebx >> 8) & 3) + 1; |
278 | c->compute_unit_id = ebx & 0xff; | 282 | c->compute_unit_id = ebx & 0xff; |
283 | cores_per_cu += ((ebx >> 8) & 3); | ||
279 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { | 284 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { |
280 | u64 value; | 285 | u64 value; |
281 | 286 | ||
@@ -288,15 +293,18 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | |||
288 | /* fixup multi-node processor information */ | 293 | /* fixup multi-node processor information */ |
289 | if (nodes > 1) { | 294 | if (nodes > 1) { |
290 | u32 cores_per_node; | 295 | u32 cores_per_node; |
296 | u32 cus_per_node; | ||
291 | 297 | ||
292 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | 298 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); |
293 | cores_per_node = c->x86_max_cores / nodes; | 299 | cores_per_node = c->x86_max_cores / nodes; |
300 | cus_per_node = cores_per_node / cores_per_cu; | ||
294 | 301 | ||
295 | /* store NodeID, use llc_shared_map to store sibling info */ | 302 | /* store NodeID, use llc_shared_map to store sibling info */ |
296 | per_cpu(cpu_llc_id, cpu) = node_id; | 303 | per_cpu(cpu_llc_id, cpu) = node_id; |
297 | 304 | ||
298 | /* core id to be in range from 0 to (cores_per_node - 1) */ | 305 | /* core id has to be in the [0 .. cores_per_node - 1] range */ |
299 | c->cpu_core_id = c->cpu_core_id % cores_per_node; | 306 | c->cpu_core_id %= cores_per_node; |
307 | c->compute_unit_id %= cus_per_node; | ||
300 | } | 308 | } |
301 | } | 309 | } |
302 | #endif | 310 | #endif |
@@ -334,31 +342,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id); | |||
334 | 342 | ||
335 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | 343 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) |
336 | { | 344 | { |
337 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 345 | #ifdef CONFIG_NUMA |
338 | int cpu = smp_processor_id(); | 346 | int cpu = smp_processor_id(); |
339 | int node; | 347 | int node; |
340 | unsigned apicid = c->apicid; | 348 | unsigned apicid = c->apicid; |
341 | 349 | ||
342 | node = per_cpu(cpu_llc_id, cpu); | 350 | node = numa_cpu_node(cpu); |
351 | if (node == NUMA_NO_NODE) | ||
352 | node = per_cpu(cpu_llc_id, cpu); | ||
343 | 353 | ||
344 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
345 | node = apicid_to_node[apicid]; | ||
346 | if (!node_online(node)) { | 354 | if (!node_online(node)) { |
347 | /* Two possibilities here: | 355 | /* |
348 | - The CPU is missing memory and no node was created. | 356 | * Two possibilities here: |
349 | In that case try picking one from a nearby CPU | 357 | * |
350 | - The APIC IDs differ from the HyperTransport node IDs | 358 | * - The CPU is missing memory and no node was created. In |
351 | which the K8 northbridge parsing fills in. | 359 | * that case try picking one from a nearby CPU. |
352 | Assume they are all increased by a constant offset, | 360 | * |
353 | but in the same order as the HT nodeids. | 361 | * - The APIC IDs differ from the HyperTransport node IDs |
354 | If that doesn't result in a usable node fall back to the | 362 | * which the K8 northbridge parsing fills in. Assume |
355 | path for the previous case. */ | 363 | * they are all increased by a constant offset, but in |
356 | 364 | * the same order as the HT nodeids. If that doesn't | |
365 | * result in a usable node fall back to the path for the | ||
366 | * previous case. | ||
367 | * | ||
368 | * This workaround operates directly on the mapping between | ||
369 | * APIC ID and NUMA node, assuming certain relationship | ||
370 | * between APIC ID, HT node ID and NUMA topology. As going | ||
371 | * through CPU mapping may alter the outcome, directly | ||
372 | * access __apicid_to_node[]. | ||
373 | */ | ||
357 | int ht_nodeid = c->initial_apicid; | 374 | int ht_nodeid = c->initial_apicid; |
358 | 375 | ||
359 | if (ht_nodeid >= 0 && | 376 | if (ht_nodeid >= 0 && |
360 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | 377 | __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) |
361 | node = apicid_to_node[ht_nodeid]; | 378 | node = __apicid_to_node[ht_nodeid]; |
362 | /* Pick a nearby node */ | 379 | /* Pick a nearby node */ |
363 | if (!node_online(node)) | 380 | if (!node_online(node)) |
364 | node = nearby_node(apicid); | 381 | node = nearby_node(apicid); |
@@ -594,6 +611,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
594 | } | 611 | } |
595 | } | 612 | } |
596 | #endif | 613 | #endif |
614 | |||
615 | /* As a rule processors have APIC timer running in deep C states */ | ||
616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) | ||
617 | set_cpu_cap(c, X86_FEATURE_ARAT); | ||
597 | } | 618 | } |
598 | 619 | ||
599 | #ifdef CONFIG_X86_32 | 620 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 1d59834396bd..e2ced0074a45 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -675,7 +675,7 @@ void __init early_cpu_init(void) | |||
675 | const struct cpu_dev *const *cdev; | 675 | const struct cpu_dev *const *cdev; |
676 | int count = 0; | 676 | int count = 0; |
677 | 677 | ||
678 | #ifdef PROCESSOR_SELECT | 678 | #ifdef CONFIG_PROCESSOR_SELECT |
679 | printk(KERN_INFO "KERNEL supported cpus:\n"); | 679 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
680 | #endif | 680 | #endif |
681 | 681 | ||
@@ -687,7 +687,7 @@ void __init early_cpu_init(void) | |||
687 | cpu_devs[count] = cpudev; | 687 | cpu_devs[count] = cpudev; |
688 | count++; | 688 | count++; |
689 | 689 | ||
690 | #ifdef PROCESSOR_SELECT | 690 | #ifdef CONFIG_PROCESSOR_SELECT |
691 | { | 691 | { |
692 | unsigned int j; | 692 | unsigned int j; |
693 | 693 | ||
@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
869 | 869 | ||
870 | select_idle_routine(c); | 870 | select_idle_routine(c); |
871 | 871 | ||
872 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 872 | #ifdef CONFIG_NUMA |
873 | numa_add_cpu(smp_processor_id()); | 873 | numa_add_cpu(smp_processor_id()); |
874 | #endif | 874 | #endif |
875 | } | 875 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index 03162dac6271..cf48cdd6907d 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c | |||
@@ -444,7 +444,7 @@ static int __cpuinit longhaul_get_ranges(void) | |||
444 | return -EINVAL; | 444 | return -EINVAL; |
445 | } | 445 | } |
446 | /* Get max multiplier - as we always did. | 446 | /* Get max multiplier - as we always did. |
447 | * Longhaul MSR is usefull only when voltage scaling is enabled. | 447 | * Longhaul MSR is useful only when voltage scaling is enabled. |
448 | * C3 is booting at max anyway. */ | 448 | * C3 is booting at max anyway. */ |
449 | maxmult = mult; | 449 | maxmult = mult; |
450 | /* Get min multiplier */ | 450 | /* Get min multiplier */ |
@@ -1011,7 +1011,7 @@ static void __exit longhaul_exit(void) | |||
1011 | * trigger frequency transition in some cases. */ | 1011 | * trigger frequency transition in some cases. */ |
1012 | module_param(disable_acpi_c3, int, 0644); | 1012 | module_param(disable_acpi_c3, int, 0644); |
1013 | MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); | 1013 | MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); |
1014 | /* Change CPU voltage with frequency. Very usefull to save | 1014 | /* Change CPU voltage with frequency. Very useful to save |
1015 | * power, but most VIA C3 processors aren't supporting it. */ | 1015 | * power, but most VIA C3 processors aren't supporting it. */ |
1016 | module_param(scale_voltage, int, 0644); | 1016 | module_param(scale_voltage, int, 0644); |
1017 | MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); | 1017 | MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 4a5a42b842ad..755a31e0f5b0 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
@@ -315,8 +315,6 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle) | |||
315 | 315 | ||
316 | input.count = 4; | 316 | input.count = 4; |
317 | input.pointer = in_params; | 317 | input.pointer = in_params; |
318 | input.count = 4; | ||
319 | input.pointer = in_params; | ||
320 | in_params[0].type = ACPI_TYPE_BUFFER; | 318 | in_params[0].type = ACPI_TYPE_BUFFER; |
321 | in_params[0].buffer.length = 16; | 319 | in_params[0].buffer.length = 16; |
322 | in_params[0].buffer.pointer = OSC_UUID; | 320 | in_params[0].buffer.pointer = OSC_UUID; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c567dec854f6..2368e38327b3 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -630,8 +630,7 @@ static void print_basics(struct powernow_k8_data *data) | |||
630 | data->powernow_table[j].frequency/1000); | 630 | data->powernow_table[j].frequency/1000); |
631 | } else { | 631 | } else { |
632 | printk(KERN_INFO PFX | 632 | printk(KERN_INFO PFX |
633 | " %d : fid 0x%x (%d MHz), vid 0x%x\n", | 633 | "fid 0x%x (%d MHz), vid 0x%x\n", |
634 | j, | ||
635 | data->powernow_table[j].index & 0xff, | 634 | data->powernow_table[j].index & 0xff, |
636 | data->powernow_table[j].frequency/1000, | 635 | data->powernow_table[j].frequency/1000, |
637 | data->powernow_table[j].index >> 8); | 636 | data->powernow_table[j].index >> 8); |
@@ -1276,7 +1275,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1276 | 1275 | ||
1277 | if (powernow_k8_cpu_init_acpi(data)) { | 1276 | if (powernow_k8_cpu_init_acpi(data)) { |
1278 | /* | 1277 | /* |
1279 | * Use the PSB BIOS structure. This is only availabe on | 1278 | * Use the PSB BIOS structure. This is only available on |
1280 | * an UP version, and is deprecated by AMD. | 1279 | * an UP version, and is deprecated by AMD. |
1281 | */ | 1280 | */ |
1282 | if (num_online_cpus() != 1) { | 1281 | if (num_online_cpus() != 1) { |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c index 8abd869baabf..91bc25b67bc1 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c | |||
@@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
292 | 292 | ||
293 | result = speedstep_smi_ownership(); | 293 | result = speedstep_smi_ownership(); |
294 | if (result) { | 294 | if (result) { |
295 | dprintk("fails in aquiring ownership of a SMI interface.\n"); | 295 | dprintk("fails in acquiring ownership of a SMI interface.\n"); |
296 | return -EINVAL; | 296 | return -EINVAL; |
297 | } | 297 | } |
298 | 298 | ||
@@ -360,7 +360,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) | |||
360 | int result = speedstep_smi_ownership(); | 360 | int result = speedstep_smi_ownership(); |
361 | 361 | ||
362 | if (result) | 362 | if (result) |
363 | dprintk("fails in re-aquiring ownership of a SMI interface.\n"); | 363 | dprintk("fails in re-acquiring ownership of a SMI interface.\n"); |
364 | 364 | ||
365 | return result; | 365 | return result; |
366 | } | 366 | } |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index d16c2c53d6bf..df86bc8c859d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
276 | 276 | ||
277 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | 277 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) |
278 | { | 278 | { |
279 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 279 | #ifdef CONFIG_NUMA |
280 | unsigned node; | 280 | unsigned node; |
281 | int cpu = smp_processor_id(); | 281 | int cpu = smp_processor_id(); |
282 | int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; | ||
283 | 282 | ||
284 | /* Don't do the funky fallback heuristics the AMD version employs | 283 | /* Don't do the funky fallback heuristics the AMD version employs |
285 | for now. */ | 284 | for now. */ |
286 | node = apicid_to_node[apicid]; | 285 | node = numa_cpu_node(cpu); |
287 | if (node == NUMA_NO_NODE || !node_online(node)) { | 286 | if (node == NUMA_NO_NODE || !node_online(node)) { |
288 | /* reuse the value from init_cpu_to_node() */ | 287 | /* reuse the value from init_cpu_to_node() */ |
289 | node = cpu_to_node(cpu); | 288 | node = cpu_to_node(cpu); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index ec2c19a7b8ef..1ce1af2899df 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -304,8 +304,9 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
304 | 304 | ||
305 | struct _cache_attr { | 305 | struct _cache_attr { |
306 | struct attribute attr; | 306 | struct attribute attr; |
307 | ssize_t (*show)(struct _cpuid4_info *, char *); | 307 | ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); |
308 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); | 308 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, |
309 | unsigned int); | ||
309 | }; | 310 | }; |
310 | 311 | ||
311 | #ifdef CONFIG_AMD_NB | 312 | #ifdef CONFIG_AMD_NB |
@@ -400,7 +401,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | |||
400 | 401 | ||
401 | #define SHOW_CACHE_DISABLE(slot) \ | 402 | #define SHOW_CACHE_DISABLE(slot) \ |
402 | static ssize_t \ | 403 | static ssize_t \ |
403 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \ | 404 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ |
405 | unsigned int cpu) \ | ||
404 | { \ | 406 | { \ |
405 | return show_cache_disable(this_leaf, buf, slot); \ | 407 | return show_cache_disable(this_leaf, buf, slot); \ |
406 | } | 408 | } |
@@ -512,7 +514,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
512 | #define STORE_CACHE_DISABLE(slot) \ | 514 | #define STORE_CACHE_DISABLE(slot) \ |
513 | static ssize_t \ | 515 | static ssize_t \ |
514 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ | 516 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ |
515 | const char *buf, size_t count) \ | 517 | const char *buf, size_t count, \ |
518 | unsigned int cpu) \ | ||
516 | { \ | 519 | { \ |
517 | return store_cache_disable(this_leaf, buf, count, slot); \ | 520 | return store_cache_disable(this_leaf, buf, count, slot); \ |
518 | } | 521 | } |
@@ -524,6 +527,39 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | |||
524 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 527 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
525 | show_cache_disable_1, store_cache_disable_1); | 528 | show_cache_disable_1, store_cache_disable_1); |
526 | 529 | ||
530 | static ssize_t | ||
531 | show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) | ||
532 | { | ||
533 | if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
534 | return -EINVAL; | ||
535 | |||
536 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); | ||
537 | } | ||
538 | |||
539 | static ssize_t | ||
540 | store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | ||
541 | unsigned int cpu) | ||
542 | { | ||
543 | unsigned long val; | ||
544 | |||
545 | if (!capable(CAP_SYS_ADMIN)) | ||
546 | return -EPERM; | ||
547 | |||
548 | if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
549 | return -EINVAL; | ||
550 | |||
551 | if (strict_strtoul(buf, 16, &val) < 0) | ||
552 | return -EINVAL; | ||
553 | |||
554 | if (amd_set_subcaches(cpu, val)) | ||
555 | return -EINVAL; | ||
556 | |||
557 | return count; | ||
558 | } | ||
559 | |||
560 | static struct _cache_attr subcaches = | ||
561 | __ATTR(subcaches, 0644, show_subcaches, store_subcaches); | ||
562 | |||
527 | #else /* CONFIG_AMD_NB */ | 563 | #else /* CONFIG_AMD_NB */ |
528 | #define amd_init_l3_cache(x, y) | 564 | #define amd_init_l3_cache(x, y) |
529 | #endif /* CONFIG_AMD_NB */ | 565 | #endif /* CONFIG_AMD_NB */ |
@@ -532,9 +568,9 @@ static int | |||
532 | __cpuinit cpuid4_cache_lookup_regs(int index, | 568 | __cpuinit cpuid4_cache_lookup_regs(int index, |
533 | struct _cpuid4_info_regs *this_leaf) | 569 | struct _cpuid4_info_regs *this_leaf) |
534 | { | 570 | { |
535 | union _cpuid4_leaf_eax eax; | 571 | union _cpuid4_leaf_eax eax; |
536 | union _cpuid4_leaf_ebx ebx; | 572 | union _cpuid4_leaf_ebx ebx; |
537 | union _cpuid4_leaf_ecx ecx; | 573 | union _cpuid4_leaf_ecx ecx; |
538 | unsigned edx; | 574 | unsigned edx; |
539 | 575 | ||
540 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { | 576 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
@@ -732,11 +768,11 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
732 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 768 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
733 | 769 | ||
734 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 770 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { |
735 | for_each_cpu(i, c->llc_shared_map) { | 771 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { |
736 | if (!per_cpu(ici_cpuid4_info, i)) | 772 | if (!per_cpu(ici_cpuid4_info, i)) |
737 | continue; | 773 | continue; |
738 | this_leaf = CPUID4_INFO_IDX(i, index); | 774 | this_leaf = CPUID4_INFO_IDX(i, index); |
739 | for_each_cpu(sibling, c->llc_shared_map) { | 775 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { |
740 | if (!cpu_online(sibling)) | 776 | if (!cpu_online(sibling)) |
741 | continue; | 777 | continue; |
742 | set_bit(sibling, this_leaf->shared_cpu_map); | 778 | set_bit(sibling, this_leaf->shared_cpu_map); |
@@ -870,8 +906,8 @@ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); | |||
870 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) | 906 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) |
871 | 907 | ||
872 | #define show_one_plus(file_name, object, val) \ | 908 | #define show_one_plus(file_name, object, val) \ |
873 | static ssize_t show_##file_name \ | 909 | static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ |
874 | (struct _cpuid4_info *this_leaf, char *buf) \ | 910 | unsigned int cpu) \ |
875 | { \ | 911 | { \ |
876 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | 912 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ |
877 | } | 913 | } |
@@ -882,7 +918,8 @@ show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); | |||
882 | show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); | 918 | show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); |
883 | show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); | 919 | show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); |
884 | 920 | ||
885 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) | 921 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, |
922 | unsigned int cpu) | ||
886 | { | 923 | { |
887 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); | 924 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); |
888 | } | 925 | } |
@@ -906,17 +943,20 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
906 | return n; | 943 | return n; |
907 | } | 944 | } |
908 | 945 | ||
909 | static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf) | 946 | static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, |
947 | unsigned int cpu) | ||
910 | { | 948 | { |
911 | return show_shared_cpu_map_func(leaf, 0, buf); | 949 | return show_shared_cpu_map_func(leaf, 0, buf); |
912 | } | 950 | } |
913 | 951 | ||
914 | static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf) | 952 | static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, |
953 | unsigned int cpu) | ||
915 | { | 954 | { |
916 | return show_shared_cpu_map_func(leaf, 1, buf); | 955 | return show_shared_cpu_map_func(leaf, 1, buf); |
917 | } | 956 | } |
918 | 957 | ||
919 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) | 958 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, |
959 | unsigned int cpu) | ||
920 | { | 960 | { |
921 | switch (this_leaf->eax.split.type) { | 961 | switch (this_leaf->eax.split.type) { |
922 | case CACHE_TYPE_DATA: | 962 | case CACHE_TYPE_DATA: |
@@ -974,6 +1014,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) | |||
974 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | 1014 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) |
975 | n += 2; | 1015 | n += 2; |
976 | 1016 | ||
1017 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1018 | n += 1; | ||
1019 | |||
977 | attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); | 1020 | attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); |
978 | if (attrs == NULL) | 1021 | if (attrs == NULL) |
979 | return attrs = default_attrs; | 1022 | return attrs = default_attrs; |
@@ -986,6 +1029,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) | |||
986 | attrs[n++] = &cache_disable_1.attr; | 1029 | attrs[n++] = &cache_disable_1.attr; |
987 | } | 1030 | } |
988 | 1031 | ||
1032 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1033 | attrs[n++] = &subcaches.attr; | ||
1034 | |||
989 | return attrs; | 1035 | return attrs; |
990 | } | 1036 | } |
991 | #endif | 1037 | #endif |
@@ -998,7 +1044,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | |||
998 | 1044 | ||
999 | ret = fattr->show ? | 1045 | ret = fattr->show ? |
1000 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 1046 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
1001 | buf) : | 1047 | buf, this_leaf->cpu) : |
1002 | 0; | 1048 | 0; |
1003 | return ret; | 1049 | return ret; |
1004 | } | 1050 | } |
@@ -1012,7 +1058,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, | |||
1012 | 1058 | ||
1013 | ret = fattr->store ? | 1059 | ret = fattr->store ? |
1014 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 1060 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
1015 | buf, count) : | 1061 | buf, count, this_leaf->cpu) : |
1016 | 0; | 1062 | 0; |
1017 | return ret; | 1063 | return ret; |
1018 | } | 1064 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index a77971979564..0ed633c5048b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -32,7 +32,7 @@ static void inject_mce(struct mce *m) | |||
32 | { | 32 | { |
33 | struct mce *i = &per_cpu(injectm, m->extcpu); | 33 | struct mce *i = &per_cpu(injectm, m->extcpu); |
34 | 34 | ||
35 | /* Make sure noone reads partially written injectm */ | 35 | /* Make sure no one reads partially written injectm */ |
36 | i->finished = 0; | 36 | i->finished = 0; |
37 | mb(); | 37 | mb(); |
38 | m->finished = 0; | 38 | m->finished = 0; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index d916183b7f9c..ab1122998dba 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -881,7 +881,7 @@ reset: | |||
881 | * Check if the address reported by the CPU is in a format we can parse. | 881 | * Check if the address reported by the CPU is in a format we can parse. |
882 | * It would be possible to add code for most other cases, but all would | 882 | * It would be possible to add code for most other cases, but all would |
883 | * be somewhat complicated (e.g. segment offset would require an instruction | 883 | * be somewhat complicated (e.g. segment offset would require an instruction |
884 | * parser). So only support physical addresses upto page granuality for now. | 884 | * parser). So only support physical addresses up to page granuality for now. |
885 | */ | 885 | */ |
886 | static int mce_usable_address(struct mce *m) | 886 | static int mce_usable_address(struct mce *m) |
887 | { | 887 | { |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 5bf2fac52aca..167f97b5596e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -527,15 +527,12 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
527 | int i, err = 0; | 527 | int i, err = 0; |
528 | struct threshold_bank *b = NULL; | 528 | struct threshold_bank *b = NULL; |
529 | char name[32]; | 529 | char name[32]; |
530 | #ifdef CONFIG_SMP | ||
531 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
532 | #endif | ||
533 | 530 | ||
534 | sprintf(name, "threshold_bank%i", bank); | 531 | sprintf(name, "threshold_bank%i", bank); |
535 | 532 | ||
536 | #ifdef CONFIG_SMP | 533 | #ifdef CONFIG_SMP |
537 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 534 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
538 | i = cpumask_first(c->llc_shared_map); | 535 | i = cpumask_first(cpu_llc_shared_mask(cpu)); |
539 | 536 | ||
540 | /* first core not up yet */ | 537 | /* first core not up yet */ |
541 | if (cpu_data(i).cpu_core_id) | 538 | if (cpu_data(i).cpu_core_id) |
@@ -555,7 +552,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
555 | if (err) | 552 | if (err) |
556 | goto out; | 553 | goto out; |
557 | 554 | ||
558 | cpumask_copy(b->cpus, c->llc_shared_map); | 555 | cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu)); |
559 | per_cpu(threshold_banks, cpu)[bank] = b; | 556 | per_cpu(threshold_banks, cpu)[bank] = b; |
560 | 557 | ||
561 | goto out; | 558 | goto out; |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 9f27228ceffd..a71efcdbb092 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong | 2 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
3 | * because MTRRs can span upto 40 bits (36bits on most modern x86) | 3 | * because MTRRs can span up to 40 bits (36bits on most modern x86) |
4 | */ | 4 | */ |
5 | #define DEBUG | 5 | #define DEBUG |
6 | 6 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9d977a2ea693..87eab4a27dfc 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/compat.h> | 32 | #include <asm/compat.h> |
33 | #include <asm/smp.h> | ||
33 | 34 | ||
34 | #if 0 | 35 | #if 0 |
35 | #undef wrmsrl | 36 | #undef wrmsrl |
@@ -93,6 +94,8 @@ struct amd_nb { | |||
93 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | 94 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; |
94 | }; | 95 | }; |
95 | 96 | ||
97 | struct intel_percore; | ||
98 | |||
96 | #define MAX_LBR_ENTRIES 16 | 99 | #define MAX_LBR_ENTRIES 16 |
97 | 100 | ||
98 | struct cpu_hw_events { | 101 | struct cpu_hw_events { |
@@ -128,6 +131,13 @@ struct cpu_hw_events { | |||
128 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | 131 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
129 | 132 | ||
130 | /* | 133 | /* |
134 | * Intel percore register state. | ||
135 | * Coordinate shared resources between HT threads. | ||
136 | */ | ||
137 | int percore_used; /* Used by this CPU? */ | ||
138 | struct intel_percore *per_core; | ||
139 | |||
140 | /* | ||
131 | * AMD specific bits | 141 | * AMD specific bits |
132 | */ | 142 | */ |
133 | struct amd_nb *amd_nb; | 143 | struct amd_nb *amd_nb; |
@@ -166,7 +176,7 @@ struct cpu_hw_events { | |||
166 | /* | 176 | /* |
167 | * Constraint on the Event code + UMask | 177 | * Constraint on the Event code + UMask |
168 | */ | 178 | */ |
169 | #define PEBS_EVENT_CONSTRAINT(c, n) \ | 179 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
170 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | 180 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
171 | 181 | ||
172 | #define EVENT_CONSTRAINT_END \ | 182 | #define EVENT_CONSTRAINT_END \ |
@@ -175,6 +185,28 @@ struct cpu_hw_events { | |||
175 | #define for_each_event_constraint(e, c) \ | 185 | #define for_each_event_constraint(e, c) \ |
176 | for ((e) = (c); (e)->weight; (e)++) | 186 | for ((e) = (c); (e)->weight; (e)++) |
177 | 187 | ||
188 | /* | ||
189 | * Extra registers for specific events. | ||
190 | * Some events need large masks and require external MSRs. | ||
191 | * Define a mapping to these extra registers. | ||
192 | */ | ||
193 | struct extra_reg { | ||
194 | unsigned int event; | ||
195 | unsigned int msr; | ||
196 | u64 config_mask; | ||
197 | u64 valid_mask; | ||
198 | }; | ||
199 | |||
200 | #define EVENT_EXTRA_REG(e, ms, m, vm) { \ | ||
201 | .event = (e), \ | ||
202 | .msr = (ms), \ | ||
203 | .config_mask = (m), \ | ||
204 | .valid_mask = (vm), \ | ||
205 | } | ||
206 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ | ||
207 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) | ||
208 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) | ||
209 | |||
178 | union perf_capabilities { | 210 | union perf_capabilities { |
179 | struct { | 211 | struct { |
180 | u64 lbr_format : 6; | 212 | u64 lbr_format : 6; |
@@ -219,6 +251,7 @@ struct x86_pmu { | |||
219 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 251 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
220 | struct perf_event *event); | 252 | struct perf_event *event); |
221 | struct event_constraint *event_constraints; | 253 | struct event_constraint *event_constraints; |
254 | struct event_constraint *percore_constraints; | ||
222 | void (*quirks)(void); | 255 | void (*quirks)(void); |
223 | int perfctr_second_write; | 256 | int perfctr_second_write; |
224 | 257 | ||
@@ -247,6 +280,11 @@ struct x86_pmu { | |||
247 | */ | 280 | */ |
248 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | 281 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ |
249 | int lbr_nr; /* hardware stack size */ | 282 | int lbr_nr; /* hardware stack size */ |
283 | |||
284 | /* | ||
285 | * Extra registers for events | ||
286 | */ | ||
287 | struct extra_reg *extra_regs; | ||
250 | }; | 288 | }; |
251 | 289 | ||
252 | static struct x86_pmu x86_pmu __read_mostly; | 290 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -271,6 +309,10 @@ static u64 __read_mostly hw_cache_event_ids | |||
271 | [PERF_COUNT_HW_CACHE_MAX] | 309 | [PERF_COUNT_HW_CACHE_MAX] |
272 | [PERF_COUNT_HW_CACHE_OP_MAX] | 310 | [PERF_COUNT_HW_CACHE_OP_MAX] |
273 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 311 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
312 | static u64 __read_mostly hw_cache_extra_regs | ||
313 | [PERF_COUNT_HW_CACHE_MAX] | ||
314 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
315 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
274 | 316 | ||
275 | /* | 317 | /* |
276 | * Propagate event elapsed time into the generic event. | 318 | * Propagate event elapsed time into the generic event. |
@@ -298,7 +340,7 @@ x86_perf_event_update(struct perf_event *event) | |||
298 | */ | 340 | */ |
299 | again: | 341 | again: |
300 | prev_raw_count = local64_read(&hwc->prev_count); | 342 | prev_raw_count = local64_read(&hwc->prev_count); |
301 | rdmsrl(hwc->event_base + idx, new_raw_count); | 343 | rdmsrl(hwc->event_base, new_raw_count); |
302 | 344 | ||
303 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 345 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
304 | new_raw_count) != prev_raw_count) | 346 | new_raw_count) != prev_raw_count) |
@@ -321,6 +363,49 @@ again: | |||
321 | return new_raw_count; | 363 | return new_raw_count; |
322 | } | 364 | } |
323 | 365 | ||
366 | /* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ | ||
367 | static inline int x86_pmu_addr_offset(int index) | ||
368 | { | ||
369 | if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) | ||
370 | return index << 1; | ||
371 | return index; | ||
372 | } | ||
373 | |||
374 | static inline unsigned int x86_pmu_config_addr(int index) | ||
375 | { | ||
376 | return x86_pmu.eventsel + x86_pmu_addr_offset(index); | ||
377 | } | ||
378 | |||
379 | static inline unsigned int x86_pmu_event_addr(int index) | ||
380 | { | ||
381 | return x86_pmu.perfctr + x86_pmu_addr_offset(index); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * Find and validate any extra registers to set up. | ||
386 | */ | ||
387 | static int x86_pmu_extra_regs(u64 config, struct perf_event *event) | ||
388 | { | ||
389 | struct extra_reg *er; | ||
390 | |||
391 | event->hw.extra_reg = 0; | ||
392 | event->hw.extra_config = 0; | ||
393 | |||
394 | if (!x86_pmu.extra_regs) | ||
395 | return 0; | ||
396 | |||
397 | for (er = x86_pmu.extra_regs; er->msr; er++) { | ||
398 | if (er->event != (config & er->config_mask)) | ||
399 | continue; | ||
400 | if (event->attr.config1 & ~er->valid_mask) | ||
401 | return -EINVAL; | ||
402 | event->hw.extra_reg = er->msr; | ||
403 | event->hw.extra_config = event->attr.config1; | ||
404 | break; | ||
405 | } | ||
406 | return 0; | ||
407 | } | ||
408 | |||
324 | static atomic_t active_events; | 409 | static atomic_t active_events; |
325 | static DEFINE_MUTEX(pmc_reserve_mutex); | 410 | static DEFINE_MUTEX(pmc_reserve_mutex); |
326 | 411 | ||
@@ -331,12 +416,12 @@ static bool reserve_pmc_hardware(void) | |||
331 | int i; | 416 | int i; |
332 | 417 | ||
333 | for (i = 0; i < x86_pmu.num_counters; i++) { | 418 | for (i = 0; i < x86_pmu.num_counters; i++) { |
334 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) | 419 | if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) |
335 | goto perfctr_fail; | 420 | goto perfctr_fail; |
336 | } | 421 | } |
337 | 422 | ||
338 | for (i = 0; i < x86_pmu.num_counters; i++) { | 423 | for (i = 0; i < x86_pmu.num_counters; i++) { |
339 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) | 424 | if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) |
340 | goto eventsel_fail; | 425 | goto eventsel_fail; |
341 | } | 426 | } |
342 | 427 | ||
@@ -344,13 +429,13 @@ static bool reserve_pmc_hardware(void) | |||
344 | 429 | ||
345 | eventsel_fail: | 430 | eventsel_fail: |
346 | for (i--; i >= 0; i--) | 431 | for (i--; i >= 0; i--) |
347 | release_evntsel_nmi(x86_pmu.eventsel + i); | 432 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
348 | 433 | ||
349 | i = x86_pmu.num_counters; | 434 | i = x86_pmu.num_counters; |
350 | 435 | ||
351 | perfctr_fail: | 436 | perfctr_fail: |
352 | for (i--; i >= 0; i--) | 437 | for (i--; i >= 0; i--) |
353 | release_perfctr_nmi(x86_pmu.perfctr + i); | 438 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
354 | 439 | ||
355 | return false; | 440 | return false; |
356 | } | 441 | } |
@@ -360,8 +445,8 @@ static void release_pmc_hardware(void) | |||
360 | int i; | 445 | int i; |
361 | 446 | ||
362 | for (i = 0; i < x86_pmu.num_counters; i++) { | 447 | for (i = 0; i < x86_pmu.num_counters; i++) { |
363 | release_perfctr_nmi(x86_pmu.perfctr + i); | 448 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
364 | release_evntsel_nmi(x86_pmu.eventsel + i); | 449 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
365 | } | 450 | } |
366 | } | 451 | } |
367 | 452 | ||
@@ -382,7 +467,7 @@ static bool check_hw_exists(void) | |||
382 | * complain and bail. | 467 | * complain and bail. |
383 | */ | 468 | */ |
384 | for (i = 0; i < x86_pmu.num_counters; i++) { | 469 | for (i = 0; i < x86_pmu.num_counters; i++) { |
385 | reg = x86_pmu.eventsel + i; | 470 | reg = x86_pmu_config_addr(i); |
386 | ret = rdmsrl_safe(reg, &val); | 471 | ret = rdmsrl_safe(reg, &val); |
387 | if (ret) | 472 | if (ret) |
388 | goto msr_fail; | 473 | goto msr_fail; |
@@ -407,8 +492,8 @@ static bool check_hw_exists(void) | |||
407 | * that don't trap on the MSR access and always return 0s. | 492 | * that don't trap on the MSR access and always return 0s. |
408 | */ | 493 | */ |
409 | val = 0xabcdUL; | 494 | val = 0xabcdUL; |
410 | ret = checking_wrmsrl(x86_pmu.perfctr, val); | 495 | ret = checking_wrmsrl(x86_pmu_event_addr(0), val); |
411 | ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); | 496 | ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); |
412 | if (ret || val != val_new) | 497 | if (ret || val != val_new) |
413 | goto msr_fail; | 498 | goto msr_fail; |
414 | 499 | ||
@@ -442,8 +527,9 @@ static inline int x86_pmu_initialized(void) | |||
442 | } | 527 | } |
443 | 528 | ||
444 | static inline int | 529 | static inline int |
445 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | 530 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) |
446 | { | 531 | { |
532 | struct perf_event_attr *attr = &event->attr; | ||
447 | unsigned int cache_type, cache_op, cache_result; | 533 | unsigned int cache_type, cache_op, cache_result; |
448 | u64 config, val; | 534 | u64 config, val; |
449 | 535 | ||
@@ -470,8 +556,8 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | |||
470 | return -EINVAL; | 556 | return -EINVAL; |
471 | 557 | ||
472 | hwc->config |= val; | 558 | hwc->config |= val; |
473 | 559 | attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; | |
474 | return 0; | 560 | return x86_pmu_extra_regs(val, event); |
475 | } | 561 | } |
476 | 562 | ||
477 | static int x86_setup_perfctr(struct perf_event *event) | 563 | static int x86_setup_perfctr(struct perf_event *event) |
@@ -496,10 +582,10 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
496 | } | 582 | } |
497 | 583 | ||
498 | if (attr->type == PERF_TYPE_RAW) | 584 | if (attr->type == PERF_TYPE_RAW) |
499 | return 0; | 585 | return x86_pmu_extra_regs(event->attr.config, event); |
500 | 586 | ||
501 | if (attr->type == PERF_TYPE_HW_CACHE) | 587 | if (attr->type == PERF_TYPE_HW_CACHE) |
502 | return set_ext_hw_attr(hwc, attr); | 588 | return set_ext_hw_attr(hwc, event); |
503 | 589 | ||
504 | if (attr->config >= x86_pmu.max_events) | 590 | if (attr->config >= x86_pmu.max_events) |
505 | return -EINVAL; | 591 | return -EINVAL; |
@@ -617,11 +703,11 @@ static void x86_pmu_disable_all(void) | |||
617 | 703 | ||
618 | if (!test_bit(idx, cpuc->active_mask)) | 704 | if (!test_bit(idx, cpuc->active_mask)) |
619 | continue; | 705 | continue; |
620 | rdmsrl(x86_pmu.eventsel + idx, val); | 706 | rdmsrl(x86_pmu_config_addr(idx), val); |
621 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) | 707 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) |
622 | continue; | 708 | continue; |
623 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | 709 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
624 | wrmsrl(x86_pmu.eventsel + idx, val); | 710 | wrmsrl(x86_pmu_config_addr(idx), val); |
625 | } | 711 | } |
626 | } | 712 | } |
627 | 713 | ||
@@ -642,21 +728,26 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
642 | x86_pmu.disable_all(); | 728 | x86_pmu.disable_all(); |
643 | } | 729 | } |
644 | 730 | ||
731 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
732 | u64 enable_mask) | ||
733 | { | ||
734 | if (hwc->extra_reg) | ||
735 | wrmsrl(hwc->extra_reg, hwc->extra_config); | ||
736 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | ||
737 | } | ||
738 | |||
645 | static void x86_pmu_enable_all(int added) | 739 | static void x86_pmu_enable_all(int added) |
646 | { | 740 | { |
647 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 741 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
648 | int idx; | 742 | int idx; |
649 | 743 | ||
650 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 744 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
651 | struct perf_event *event = cpuc->events[idx]; | 745 | struct hw_perf_event *hwc = &cpuc->events[idx]->hw; |
652 | u64 val; | ||
653 | 746 | ||
654 | if (!test_bit(idx, cpuc->active_mask)) | 747 | if (!test_bit(idx, cpuc->active_mask)) |
655 | continue; | 748 | continue; |
656 | 749 | ||
657 | val = event->hw.config; | 750 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
658 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | ||
659 | wrmsrl(x86_pmu.eventsel + idx, val); | ||
660 | } | 751 | } |
661 | } | 752 | } |
662 | 753 | ||
@@ -821,15 +912,10 @@ static inline void x86_assign_hw_event(struct perf_event *event, | |||
821 | hwc->event_base = 0; | 912 | hwc->event_base = 0; |
822 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { | 913 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { |
823 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | 914 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
824 | /* | 915 | hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0; |
825 | * We set it so that event_base + idx in wrmsr/rdmsr maps to | ||
826 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: | ||
827 | */ | ||
828 | hwc->event_base = | ||
829 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; | ||
830 | } else { | 916 | } else { |
831 | hwc->config_base = x86_pmu.eventsel; | 917 | hwc->config_base = x86_pmu_config_addr(hwc->idx); |
832 | hwc->event_base = x86_pmu.perfctr; | 918 | hwc->event_base = x86_pmu_event_addr(hwc->idx); |
833 | } | 919 | } |
834 | } | 920 | } |
835 | 921 | ||
@@ -915,17 +1001,11 @@ static void x86_pmu_enable(struct pmu *pmu) | |||
915 | x86_pmu.enable_all(added); | 1001 | x86_pmu.enable_all(added); |
916 | } | 1002 | } |
917 | 1003 | ||
918 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
919 | u64 enable_mask) | ||
920 | { | ||
921 | wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask); | ||
922 | } | ||
923 | |||
924 | static inline void x86_pmu_disable_event(struct perf_event *event) | 1004 | static inline void x86_pmu_disable_event(struct perf_event *event) |
925 | { | 1005 | { |
926 | struct hw_perf_event *hwc = &event->hw; | 1006 | struct hw_perf_event *hwc = &event->hw; |
927 | 1007 | ||
928 | wrmsrl(hwc->config_base + hwc->idx, hwc->config); | 1008 | wrmsrl(hwc->config_base, hwc->config); |
929 | } | 1009 | } |
930 | 1010 | ||
931 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 1011 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
@@ -978,7 +1058,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
978 | */ | 1058 | */ |
979 | local64_set(&hwc->prev_count, (u64)-left); | 1059 | local64_set(&hwc->prev_count, (u64)-left); |
980 | 1060 | ||
981 | wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask); | 1061 | wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); |
982 | 1062 | ||
983 | /* | 1063 | /* |
984 | * Due to erratum on certan cpu we need | 1064 | * Due to erratum on certan cpu we need |
@@ -986,7 +1066,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
986 | * is updated properly | 1066 | * is updated properly |
987 | */ | 1067 | */ |
988 | if (x86_pmu.perfctr_second_write) { | 1068 | if (x86_pmu.perfctr_second_write) { |
989 | wrmsrl(hwc->event_base + idx, | 1069 | wrmsrl(hwc->event_base, |
990 | (u64)(-left) & x86_pmu.cntval_mask); | 1070 | (u64)(-left) & x86_pmu.cntval_mask); |
991 | } | 1071 | } |
992 | 1072 | ||
@@ -1029,7 +1109,7 @@ static int x86_pmu_add(struct perf_event *event, int flags) | |||
1029 | 1109 | ||
1030 | /* | 1110 | /* |
1031 | * If group events scheduling transaction was started, | 1111 | * If group events scheduling transaction was started, |
1032 | * skip the schedulability test here, it will be peformed | 1112 | * skip the schedulability test here, it will be performed |
1033 | * at commit time (->commit_txn) as a whole | 1113 | * at commit time (->commit_txn) as a whole |
1034 | */ | 1114 | */ |
1035 | if (cpuc->group_flag & PERF_EVENT_TXN) | 1115 | if (cpuc->group_flag & PERF_EVENT_TXN) |
@@ -1113,8 +1193,8 @@ void perf_event_print_debug(void) | |||
1113 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); | 1193 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); |
1114 | 1194 | ||
1115 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1195 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1116 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); | 1196 | rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); |
1117 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); | 1197 | rdmsrl(x86_pmu_event_addr(idx), pmc_count); |
1118 | 1198 | ||
1119 | prev_left = per_cpu(pmc_prev_left[idx], cpu); | 1199 | prev_left = per_cpu(pmc_prev_left[idx], cpu); |
1120 | 1200 | ||
@@ -1389,7 +1469,7 @@ static void __init pmu_check_apic(void) | |||
1389 | pr_info("no hardware sampling interrupt available.\n"); | 1469 | pr_info("no hardware sampling interrupt available.\n"); |
1390 | } | 1470 | } |
1391 | 1471 | ||
1392 | int __init init_hw_perf_events(void) | 1472 | static int __init init_hw_perf_events(void) |
1393 | { | 1473 | { |
1394 | struct event_constraint *c; | 1474 | struct event_constraint *c; |
1395 | int err; | 1475 | int err; |
@@ -1608,7 +1688,7 @@ out: | |||
1608 | return ret; | 1688 | return ret; |
1609 | } | 1689 | } |
1610 | 1690 | ||
1611 | int x86_pmu_event_init(struct perf_event *event) | 1691 | static int x86_pmu_event_init(struct perf_event *event) |
1612 | { | 1692 | { |
1613 | struct pmu *tmp; | 1693 | struct pmu *tmp; |
1614 | int err; | 1694 | int err; |
@@ -1710,7 +1790,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1710 | 1790 | ||
1711 | perf_callchain_store(entry, regs->ip); | 1791 | perf_callchain_store(entry, regs->ip); |
1712 | 1792 | ||
1713 | dump_trace(NULL, regs, NULL, &backtrace_ops, entry); | 1793 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); |
1714 | } | 1794 | } |
1715 | 1795 | ||
1716 | #ifdef CONFIG_COMPAT | 1796 | #ifdef CONFIG_COMPAT |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 67e2202a6039..461f62bbd774 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -127,6 +127,11 @@ static int amd_pmu_hw_config(struct perf_event *event) | |||
127 | /* | 127 | /* |
128 | * AMD64 events are detected based on their event codes. | 128 | * AMD64 events are detected based on their event codes. |
129 | */ | 129 | */ |
130 | static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) | ||
131 | { | ||
132 | return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); | ||
133 | } | ||
134 | |||
130 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) | 135 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) |
131 | { | 136 | { |
132 | return (hwc->config & 0xe0) == 0xe0; | 137 | return (hwc->config & 0xe0) == 0xe0; |
@@ -385,13 +390,181 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
385 | .cpu_dead = amd_pmu_cpu_dead, | 390 | .cpu_dead = amd_pmu_cpu_dead, |
386 | }; | 391 | }; |
387 | 392 | ||
393 | /* AMD Family 15h */ | ||
394 | |||
395 | #define AMD_EVENT_TYPE_MASK 0x000000F0ULL | ||
396 | |||
397 | #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL | ||
398 | #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL | ||
399 | #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL | ||
400 | #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL | ||
401 | #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL | ||
402 | #define AMD_EVENT_EX_LS 0x000000C0ULL | ||
403 | #define AMD_EVENT_DE 0x000000D0ULL | ||
404 | #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL | ||
405 | |||
406 | /* | ||
407 | * AMD family 15h event code/PMC mappings: | ||
408 | * | ||
409 | * type = event_code & 0x0F0: | ||
410 | * | ||
411 | * 0x000 FP PERF_CTL[5:3] | ||
412 | * 0x010 FP PERF_CTL[5:3] | ||
413 | * 0x020 LS PERF_CTL[5:0] | ||
414 | * 0x030 LS PERF_CTL[5:0] | ||
415 | * 0x040 DC PERF_CTL[5:0] | ||
416 | * 0x050 DC PERF_CTL[5:0] | ||
417 | * 0x060 CU PERF_CTL[2:0] | ||
418 | * 0x070 CU PERF_CTL[2:0] | ||
419 | * 0x080 IC/DE PERF_CTL[2:0] | ||
420 | * 0x090 IC/DE PERF_CTL[2:0] | ||
421 | * 0x0A0 --- | ||
422 | * 0x0B0 --- | ||
423 | * 0x0C0 EX/LS PERF_CTL[5:0] | ||
424 | * 0x0D0 DE PERF_CTL[2:0] | ||
425 | * 0x0E0 NB NB_PERF_CTL[3:0] | ||
426 | * 0x0F0 NB NB_PERF_CTL[3:0] | ||
427 | * | ||
428 | * Exceptions: | ||
429 | * | ||
430 | * 0x003 FP PERF_CTL[3] | ||
431 | * 0x00B FP PERF_CTL[3] | ||
432 | * 0x00D FP PERF_CTL[3] | ||
433 | * 0x023 DE PERF_CTL[2:0] | ||
434 | * 0x02D LS PERF_CTL[3] | ||
435 | * 0x02E LS PERF_CTL[3,0] | ||
436 | * 0x043 CU PERF_CTL[2:0] | ||
437 | * 0x045 CU PERF_CTL[2:0] | ||
438 | * 0x046 CU PERF_CTL[2:0] | ||
439 | * 0x054 CU PERF_CTL[2:0] | ||
440 | * 0x055 CU PERF_CTL[2:0] | ||
441 | * 0x08F IC PERF_CTL[0] | ||
442 | * 0x187 DE PERF_CTL[0] | ||
443 | * 0x188 DE PERF_CTL[0] | ||
444 | * 0x0DB EX PERF_CTL[5:0] | ||
445 | * 0x0DC LS PERF_CTL[5:0] | ||
446 | * 0x0DD LS PERF_CTL[5:0] | ||
447 | * 0x0DE LS PERF_CTL[5:0] | ||
448 | * 0x0DF LS PERF_CTL[5:0] | ||
449 | * 0x1D6 EX PERF_CTL[5:0] | ||
450 | * 0x1D8 EX PERF_CTL[5:0] | ||
451 | */ | ||
452 | |||
453 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | ||
454 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); | ||
455 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); | ||
456 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0); | ||
457 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); | ||
458 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | ||
459 | |||
460 | static struct event_constraint * | ||
461 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
462 | { | ||
463 | unsigned int event_code = amd_get_event_code(&event->hw); | ||
464 | |||
465 | switch (event_code & AMD_EVENT_TYPE_MASK) { | ||
466 | case AMD_EVENT_FP: | ||
467 | switch (event_code) { | ||
468 | case 0x003: | ||
469 | case 0x00B: | ||
470 | case 0x00D: | ||
471 | return &amd_f15_PMC3; | ||
472 | default: | ||
473 | return &amd_f15_PMC53; | ||
474 | } | ||
475 | case AMD_EVENT_LS: | ||
476 | case AMD_EVENT_DC: | ||
477 | case AMD_EVENT_EX_LS: | ||
478 | switch (event_code) { | ||
479 | case 0x023: | ||
480 | case 0x043: | ||
481 | case 0x045: | ||
482 | case 0x046: | ||
483 | case 0x054: | ||
484 | case 0x055: | ||
485 | return &amd_f15_PMC20; | ||
486 | case 0x02D: | ||
487 | return &amd_f15_PMC3; | ||
488 | case 0x02E: | ||
489 | return &amd_f15_PMC30; | ||
490 | default: | ||
491 | return &amd_f15_PMC50; | ||
492 | } | ||
493 | case AMD_EVENT_CU: | ||
494 | case AMD_EVENT_IC_DE: | ||
495 | case AMD_EVENT_DE: | ||
496 | switch (event_code) { | ||
497 | case 0x08F: | ||
498 | case 0x187: | ||
499 | case 0x188: | ||
500 | return &amd_f15_PMC0; | ||
501 | case 0x0DB ... 0x0DF: | ||
502 | case 0x1D6: | ||
503 | case 0x1D8: | ||
504 | return &amd_f15_PMC50; | ||
505 | default: | ||
506 | return &amd_f15_PMC20; | ||
507 | } | ||
508 | case AMD_EVENT_NB: | ||
509 | /* not yet implemented */ | ||
510 | return &emptyconstraint; | ||
511 | default: | ||
512 | return &emptyconstraint; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | static __initconst const struct x86_pmu amd_pmu_f15h = { | ||
517 | .name = "AMD Family 15h", | ||
518 | .handle_irq = x86_pmu_handle_irq, | ||
519 | .disable_all = x86_pmu_disable_all, | ||
520 | .enable_all = x86_pmu_enable_all, | ||
521 | .enable = x86_pmu_enable_event, | ||
522 | .disable = x86_pmu_disable_event, | ||
523 | .hw_config = amd_pmu_hw_config, | ||
524 | .schedule_events = x86_schedule_events, | ||
525 | .eventsel = MSR_F15H_PERF_CTL, | ||
526 | .perfctr = MSR_F15H_PERF_CTR, | ||
527 | .event_map = amd_pmu_event_map, | ||
528 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | ||
529 | .num_counters = 6, | ||
530 | .cntval_bits = 48, | ||
531 | .cntval_mask = (1ULL << 48) - 1, | ||
532 | .apic = 1, | ||
533 | /* use highest bit to detect overflow */ | ||
534 | .max_period = (1ULL << 47) - 1, | ||
535 | .get_event_constraints = amd_get_event_constraints_f15h, | ||
536 | /* nortbridge counters not yet implemented: */ | ||
537 | #if 0 | ||
538 | .put_event_constraints = amd_put_event_constraints, | ||
539 | |||
540 | .cpu_prepare = amd_pmu_cpu_prepare, | ||
541 | .cpu_starting = amd_pmu_cpu_starting, | ||
542 | .cpu_dead = amd_pmu_cpu_dead, | ||
543 | #endif | ||
544 | }; | ||
545 | |||
388 | static __init int amd_pmu_init(void) | 546 | static __init int amd_pmu_init(void) |
389 | { | 547 | { |
390 | /* Performance-monitoring supported from K7 and later: */ | 548 | /* Performance-monitoring supported from K7 and later: */ |
391 | if (boot_cpu_data.x86 < 6) | 549 | if (boot_cpu_data.x86 < 6) |
392 | return -ENODEV; | 550 | return -ENODEV; |
393 | 551 | ||
394 | x86_pmu = amd_pmu; | 552 | /* |
553 | * If core performance counter extensions exists, it must be | ||
554 | * family 15h, otherwise fail. See x86_pmu_addr_offset(). | ||
555 | */ | ||
556 | switch (boot_cpu_data.x86) { | ||
557 | case 0x15: | ||
558 | if (!cpu_has_perfctr_core) | ||
559 | return -ENODEV; | ||
560 | x86_pmu = amd_pmu_f15h; | ||
561 | break; | ||
562 | default: | ||
563 | if (cpu_has_perfctr_core) | ||
564 | return -ENODEV; | ||
565 | x86_pmu = amd_pmu; | ||
566 | break; | ||
567 | } | ||
395 | 568 | ||
396 | /* Events are common for all AMDs */ | 569 | /* Events are common for all AMDs */ |
397 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | 570 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 008835c1d79c..8fc2b2cee1da 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1,5 +1,27 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | 1 | #ifdef CONFIG_CPU_SUP_INTEL |
2 | 2 | ||
3 | #define MAX_EXTRA_REGS 2 | ||
4 | |||
5 | /* | ||
6 | * Per register state. | ||
7 | */ | ||
8 | struct er_account { | ||
9 | int ref; /* reference count */ | ||
10 | unsigned int extra_reg; /* extra MSR number */ | ||
11 | u64 extra_config; /* extra MSR config */ | ||
12 | }; | ||
13 | |||
14 | /* | ||
15 | * Per core state | ||
16 | * This used to coordinate shared registers for HT threads. | ||
17 | */ | ||
18 | struct intel_percore { | ||
19 | raw_spinlock_t lock; /* protect structure */ | ||
20 | struct er_account regs[MAX_EXTRA_REGS]; | ||
21 | int refcnt; /* number of threads */ | ||
22 | unsigned core_id; | ||
23 | }; | ||
24 | |||
3 | /* | 25 | /* |
4 | * Intel PerfMon, used on Core and later. | 26 | * Intel PerfMon, used on Core and later. |
5 | */ | 27 | */ |
@@ -64,6 +86,18 @@ static struct event_constraint intel_nehalem_event_constraints[] = | |||
64 | EVENT_CONSTRAINT_END | 86 | EVENT_CONSTRAINT_END |
65 | }; | 87 | }; |
66 | 88 | ||
89 | static struct extra_reg intel_nehalem_extra_regs[] = | ||
90 | { | ||
91 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | ||
92 | EVENT_EXTRA_END | ||
93 | }; | ||
94 | |||
95 | static struct event_constraint intel_nehalem_percore_constraints[] = | ||
96 | { | ||
97 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | ||
98 | EVENT_CONSTRAINT_END | ||
99 | }; | ||
100 | |||
67 | static struct event_constraint intel_westmere_event_constraints[] = | 101 | static struct event_constraint intel_westmere_event_constraints[] = |
68 | { | 102 | { |
69 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 103 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
@@ -76,6 +110,33 @@ static struct event_constraint intel_westmere_event_constraints[] = | |||
76 | EVENT_CONSTRAINT_END | 110 | EVENT_CONSTRAINT_END |
77 | }; | 111 | }; |
78 | 112 | ||
113 | static struct event_constraint intel_snb_event_constraints[] = | ||
114 | { | ||
115 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | ||
116 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | ||
117 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
118 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ | ||
119 | INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */ | ||
120 | INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */ | ||
121 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | ||
122 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | ||
123 | EVENT_CONSTRAINT_END | ||
124 | }; | ||
125 | |||
126 | static struct extra_reg intel_westmere_extra_regs[] = | ||
127 | { | ||
128 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | ||
129 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), | ||
130 | EVENT_EXTRA_END | ||
131 | }; | ||
132 | |||
133 | static struct event_constraint intel_westmere_percore_constraints[] = | ||
134 | { | ||
135 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | ||
136 | INTEL_EVENT_CONSTRAINT(0xbb, 0), | ||
137 | EVENT_CONSTRAINT_END | ||
138 | }; | ||
139 | |||
79 | static struct event_constraint intel_gen_event_constraints[] = | 140 | static struct event_constraint intel_gen_event_constraints[] = |
80 | { | 141 | { |
81 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 142 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
@@ -89,6 +150,106 @@ static u64 intel_pmu_event_map(int hw_event) | |||
89 | return intel_perfmon_event_map[hw_event]; | 150 | return intel_perfmon_event_map[hw_event]; |
90 | } | 151 | } |
91 | 152 | ||
153 | static __initconst const u64 snb_hw_cache_event_ids | ||
154 | [PERF_COUNT_HW_CACHE_MAX] | ||
155 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
156 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
157 | { | ||
158 | [ C(L1D) ] = { | ||
159 | [ C(OP_READ) ] = { | ||
160 | [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */ | ||
161 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */ | ||
162 | }, | ||
163 | [ C(OP_WRITE) ] = { | ||
164 | [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */ | ||
165 | [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */ | ||
166 | }, | ||
167 | [ C(OP_PREFETCH) ] = { | ||
168 | [ C(RESULT_ACCESS) ] = 0x0, | ||
169 | [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */ | ||
170 | }, | ||
171 | }, | ||
172 | [ C(L1I ) ] = { | ||
173 | [ C(OP_READ) ] = { | ||
174 | [ C(RESULT_ACCESS) ] = 0x0, | ||
175 | [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */ | ||
176 | }, | ||
177 | [ C(OP_WRITE) ] = { | ||
178 | [ C(RESULT_ACCESS) ] = -1, | ||
179 | [ C(RESULT_MISS) ] = -1, | ||
180 | }, | ||
181 | [ C(OP_PREFETCH) ] = { | ||
182 | [ C(RESULT_ACCESS) ] = 0x0, | ||
183 | [ C(RESULT_MISS) ] = 0x0, | ||
184 | }, | ||
185 | }, | ||
186 | [ C(LL ) ] = { | ||
187 | /* | ||
188 | * TBD: Need Off-core Response Performance Monitoring support | ||
189 | */ | ||
190 | [ C(OP_READ) ] = { | ||
191 | /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ | ||
192 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
193 | /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ | ||
194 | [ C(RESULT_MISS) ] = 0x01bb, | ||
195 | }, | ||
196 | [ C(OP_WRITE) ] = { | ||
197 | /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */ | ||
198 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
199 | /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */ | ||
200 | [ C(RESULT_MISS) ] = 0x01bb, | ||
201 | }, | ||
202 | [ C(OP_PREFETCH) ] = { | ||
203 | /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ | ||
204 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
205 | /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ | ||
206 | [ C(RESULT_MISS) ] = 0x01bb, | ||
207 | }, | ||
208 | }, | ||
209 | [ C(DTLB) ] = { | ||
210 | [ C(OP_READ) ] = { | ||
211 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ | ||
212 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ | ||
213 | }, | ||
214 | [ C(OP_WRITE) ] = { | ||
215 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ | ||
216 | [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ | ||
217 | }, | ||
218 | [ C(OP_PREFETCH) ] = { | ||
219 | [ C(RESULT_ACCESS) ] = 0x0, | ||
220 | [ C(RESULT_MISS) ] = 0x0, | ||
221 | }, | ||
222 | }, | ||
223 | [ C(ITLB) ] = { | ||
224 | [ C(OP_READ) ] = { | ||
225 | [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */ | ||
226 | [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */ | ||
227 | }, | ||
228 | [ C(OP_WRITE) ] = { | ||
229 | [ C(RESULT_ACCESS) ] = -1, | ||
230 | [ C(RESULT_MISS) ] = -1, | ||
231 | }, | ||
232 | [ C(OP_PREFETCH) ] = { | ||
233 | [ C(RESULT_ACCESS) ] = -1, | ||
234 | [ C(RESULT_MISS) ] = -1, | ||
235 | }, | ||
236 | }, | ||
237 | [ C(BPU ) ] = { | ||
238 | [ C(OP_READ) ] = { | ||
239 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
240 | [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ | ||
241 | }, | ||
242 | [ C(OP_WRITE) ] = { | ||
243 | [ C(RESULT_ACCESS) ] = -1, | ||
244 | [ C(RESULT_MISS) ] = -1, | ||
245 | }, | ||
246 | [ C(OP_PREFETCH) ] = { | ||
247 | [ C(RESULT_ACCESS) ] = -1, | ||
248 | [ C(RESULT_MISS) ] = -1, | ||
249 | }, | ||
250 | }, | ||
251 | }; | ||
252 | |||
92 | static __initconst const u64 westmere_hw_cache_event_ids | 253 | static __initconst const u64 westmere_hw_cache_event_ids |
93 | [PERF_COUNT_HW_CACHE_MAX] | 254 | [PERF_COUNT_HW_CACHE_MAX] |
94 | [PERF_COUNT_HW_CACHE_OP_MAX] | 255 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -124,16 +285,26 @@ static __initconst const u64 westmere_hw_cache_event_ids | |||
124 | }, | 285 | }, |
125 | [ C(LL ) ] = { | 286 | [ C(LL ) ] = { |
126 | [ C(OP_READ) ] = { | 287 | [ C(OP_READ) ] = { |
127 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | 288 | /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ |
128 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | 289 | [ C(RESULT_ACCESS) ] = 0x01b7, |
290 | /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ | ||
291 | [ C(RESULT_MISS) ] = 0x01bb, | ||
129 | }, | 292 | }, |
293 | /* | ||
294 | * Use RFO, not WRITEBACK, because a write miss would typically occur | ||
295 | * on RFO. | ||
296 | */ | ||
130 | [ C(OP_WRITE) ] = { | 297 | [ C(OP_WRITE) ] = { |
131 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | 298 | /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */ |
132 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | 299 | [ C(RESULT_ACCESS) ] = 0x01bb, |
300 | /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */ | ||
301 | [ C(RESULT_MISS) ] = 0x01b7, | ||
133 | }, | 302 | }, |
134 | [ C(OP_PREFETCH) ] = { | 303 | [ C(OP_PREFETCH) ] = { |
135 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | 304 | /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ |
136 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | 305 | [ C(RESULT_ACCESS) ] = 0x01b7, |
306 | /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ | ||
307 | [ C(RESULT_MISS) ] = 0x01bb, | ||
137 | }, | 308 | }, |
138 | }, | 309 | }, |
139 | [ C(DTLB) ] = { | 310 | [ C(DTLB) ] = { |
@@ -180,6 +351,39 @@ static __initconst const u64 westmere_hw_cache_event_ids | |||
180 | }, | 351 | }, |
181 | }; | 352 | }; |
182 | 353 | ||
354 | /* | ||
355 | * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3 | ||
356 | */ | ||
357 | |||
358 | #define DMND_DATA_RD (1 << 0) | ||
359 | #define DMND_RFO (1 << 1) | ||
360 | #define DMND_WB (1 << 3) | ||
361 | #define PF_DATA_RD (1 << 4) | ||
362 | #define PF_DATA_RFO (1 << 5) | ||
363 | #define RESP_UNCORE_HIT (1 << 8) | ||
364 | #define RESP_MISS (0xf600) /* non uncore hit */ | ||
365 | |||
366 | static __initconst const u64 nehalem_hw_cache_extra_regs | ||
367 | [PERF_COUNT_HW_CACHE_MAX] | ||
368 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
369 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
370 | { | ||
371 | [ C(LL ) ] = { | ||
372 | [ C(OP_READ) ] = { | ||
373 | [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT, | ||
374 | [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS, | ||
375 | }, | ||
376 | [ C(OP_WRITE) ] = { | ||
377 | [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT, | ||
378 | [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS, | ||
379 | }, | ||
380 | [ C(OP_PREFETCH) ] = { | ||
381 | [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT, | ||
382 | [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS, | ||
383 | }, | ||
384 | } | ||
385 | }; | ||
386 | |||
183 | static __initconst const u64 nehalem_hw_cache_event_ids | 387 | static __initconst const u64 nehalem_hw_cache_event_ids |
184 | [PERF_COUNT_HW_CACHE_MAX] | 388 | [PERF_COUNT_HW_CACHE_MAX] |
185 | [PERF_COUNT_HW_CACHE_OP_MAX] | 389 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -215,16 +419,26 @@ static __initconst const u64 nehalem_hw_cache_event_ids | |||
215 | }, | 419 | }, |
216 | [ C(LL ) ] = { | 420 | [ C(LL ) ] = { |
217 | [ C(OP_READ) ] = { | 421 | [ C(OP_READ) ] = { |
218 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | 422 | /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ |
219 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | 423 | [ C(RESULT_ACCESS) ] = 0x01b7, |
424 | /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ | ||
425 | [ C(RESULT_MISS) ] = 0x01b7, | ||
220 | }, | 426 | }, |
427 | /* | ||
428 | * Use RFO, not WRITEBACK, because a write miss would typically occur | ||
429 | * on RFO. | ||
430 | */ | ||
221 | [ C(OP_WRITE) ] = { | 431 | [ C(OP_WRITE) ] = { |
222 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | 432 | /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ |
223 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | 433 | [ C(RESULT_ACCESS) ] = 0x01b7, |
434 | /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ | ||
435 | [ C(RESULT_MISS) ] = 0x01b7, | ||
224 | }, | 436 | }, |
225 | [ C(OP_PREFETCH) ] = { | 437 | [ C(OP_PREFETCH) ] = { |
226 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | 438 | /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ |
227 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | 439 | [ C(RESULT_ACCESS) ] = 0x01b7, |
440 | /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ | ||
441 | [ C(RESULT_MISS) ] = 0x01b7, | ||
228 | }, | 442 | }, |
229 | }, | 443 | }, |
230 | [ C(DTLB) ] = { | 444 | [ C(DTLB) ] = { |
@@ -691,8 +905,8 @@ static void intel_pmu_reset(void) | |||
691 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | 905 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); |
692 | 906 | ||
693 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 907 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
694 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); | 908 | checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); |
695 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); | 909 | checking_wrmsrl(x86_pmu_event_addr(idx), 0ull); |
696 | } | 910 | } |
697 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) | 911 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) |
698 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | 912 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
@@ -794,6 +1008,67 @@ intel_bts_constraints(struct perf_event *event) | |||
794 | } | 1008 | } |
795 | 1009 | ||
796 | static struct event_constraint * | 1010 | static struct event_constraint * |
1011 | intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
1012 | { | ||
1013 | struct hw_perf_event *hwc = &event->hw; | ||
1014 | unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT; | ||
1015 | struct event_constraint *c; | ||
1016 | struct intel_percore *pc; | ||
1017 | struct er_account *era; | ||
1018 | int i; | ||
1019 | int free_slot; | ||
1020 | int found; | ||
1021 | |||
1022 | if (!x86_pmu.percore_constraints || hwc->extra_alloc) | ||
1023 | return NULL; | ||
1024 | |||
1025 | for (c = x86_pmu.percore_constraints; c->cmask; c++) { | ||
1026 | if (e != c->code) | ||
1027 | continue; | ||
1028 | |||
1029 | /* | ||
1030 | * Allocate resource per core. | ||
1031 | */ | ||
1032 | pc = cpuc->per_core; | ||
1033 | if (!pc) | ||
1034 | break; | ||
1035 | c = &emptyconstraint; | ||
1036 | raw_spin_lock(&pc->lock); | ||
1037 | free_slot = -1; | ||
1038 | found = 0; | ||
1039 | for (i = 0; i < MAX_EXTRA_REGS; i++) { | ||
1040 | era = &pc->regs[i]; | ||
1041 | if (era->ref > 0 && hwc->extra_reg == era->extra_reg) { | ||
1042 | /* Allow sharing same config */ | ||
1043 | if (hwc->extra_config == era->extra_config) { | ||
1044 | era->ref++; | ||
1045 | cpuc->percore_used = 1; | ||
1046 | hwc->extra_alloc = 1; | ||
1047 | c = NULL; | ||
1048 | } | ||
1049 | /* else conflict */ | ||
1050 | found = 1; | ||
1051 | break; | ||
1052 | } else if (era->ref == 0 && free_slot == -1) | ||
1053 | free_slot = i; | ||
1054 | } | ||
1055 | if (!found && free_slot != -1) { | ||
1056 | era = &pc->regs[free_slot]; | ||
1057 | era->ref = 1; | ||
1058 | era->extra_reg = hwc->extra_reg; | ||
1059 | era->extra_config = hwc->extra_config; | ||
1060 | cpuc->percore_used = 1; | ||
1061 | hwc->extra_alloc = 1; | ||
1062 | c = NULL; | ||
1063 | } | ||
1064 | raw_spin_unlock(&pc->lock); | ||
1065 | return c; | ||
1066 | } | ||
1067 | |||
1068 | return NULL; | ||
1069 | } | ||
1070 | |||
1071 | static struct event_constraint * | ||
797 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | 1072 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) |
798 | { | 1073 | { |
799 | struct event_constraint *c; | 1074 | struct event_constraint *c; |
@@ -806,9 +1081,51 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event | |||
806 | if (c) | 1081 | if (c) |
807 | return c; | 1082 | return c; |
808 | 1083 | ||
1084 | c = intel_percore_constraints(cpuc, event); | ||
1085 | if (c) | ||
1086 | return c; | ||
1087 | |||
809 | return x86_get_event_constraints(cpuc, event); | 1088 | return x86_get_event_constraints(cpuc, event); |
810 | } | 1089 | } |
811 | 1090 | ||
1091 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, | ||
1092 | struct perf_event *event) | ||
1093 | { | ||
1094 | struct extra_reg *er; | ||
1095 | struct intel_percore *pc; | ||
1096 | struct er_account *era; | ||
1097 | struct hw_perf_event *hwc = &event->hw; | ||
1098 | int i, allref; | ||
1099 | |||
1100 | if (!cpuc->percore_used) | ||
1101 | return; | ||
1102 | |||
1103 | for (er = x86_pmu.extra_regs; er->msr; er++) { | ||
1104 | if (er->event != (hwc->config & er->config_mask)) | ||
1105 | continue; | ||
1106 | |||
1107 | pc = cpuc->per_core; | ||
1108 | raw_spin_lock(&pc->lock); | ||
1109 | for (i = 0; i < MAX_EXTRA_REGS; i++) { | ||
1110 | era = &pc->regs[i]; | ||
1111 | if (era->ref > 0 && | ||
1112 | era->extra_config == hwc->extra_config && | ||
1113 | era->extra_reg == er->msr) { | ||
1114 | era->ref--; | ||
1115 | hwc->extra_alloc = 0; | ||
1116 | break; | ||
1117 | } | ||
1118 | } | ||
1119 | allref = 0; | ||
1120 | for (i = 0; i < MAX_EXTRA_REGS; i++) | ||
1121 | allref += pc->regs[i].ref; | ||
1122 | if (allref == 0) | ||
1123 | cpuc->percore_used = 0; | ||
1124 | raw_spin_unlock(&pc->lock); | ||
1125 | break; | ||
1126 | } | ||
1127 | } | ||
1128 | |||
812 | static int intel_pmu_hw_config(struct perf_event *event) | 1129 | static int intel_pmu_hw_config(struct perf_event *event) |
813 | { | 1130 | { |
814 | int ret = x86_pmu_hw_config(event); | 1131 | int ret = x86_pmu_hw_config(event); |
@@ -880,20 +1197,67 @@ static __initconst const struct x86_pmu core_pmu = { | |||
880 | */ | 1197 | */ |
881 | .max_period = (1ULL << 31) - 1, | 1198 | .max_period = (1ULL << 31) - 1, |
882 | .get_event_constraints = intel_get_event_constraints, | 1199 | .get_event_constraints = intel_get_event_constraints, |
1200 | .put_event_constraints = intel_put_event_constraints, | ||
883 | .event_constraints = intel_core_event_constraints, | 1201 | .event_constraints = intel_core_event_constraints, |
884 | }; | 1202 | }; |
885 | 1203 | ||
1204 | static int intel_pmu_cpu_prepare(int cpu) | ||
1205 | { | ||
1206 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
1207 | |||
1208 | if (!cpu_has_ht_siblings()) | ||
1209 | return NOTIFY_OK; | ||
1210 | |||
1211 | cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), | ||
1212 | GFP_KERNEL, cpu_to_node(cpu)); | ||
1213 | if (!cpuc->per_core) | ||
1214 | return NOTIFY_BAD; | ||
1215 | |||
1216 | raw_spin_lock_init(&cpuc->per_core->lock); | ||
1217 | cpuc->per_core->core_id = -1; | ||
1218 | return NOTIFY_OK; | ||
1219 | } | ||
1220 | |||
886 | static void intel_pmu_cpu_starting(int cpu) | 1221 | static void intel_pmu_cpu_starting(int cpu) |
887 | { | 1222 | { |
1223 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
1224 | int core_id = topology_core_id(cpu); | ||
1225 | int i; | ||
1226 | |||
888 | init_debug_store_on_cpu(cpu); | 1227 | init_debug_store_on_cpu(cpu); |
889 | /* | 1228 | /* |
890 | * Deal with CPUs that don't clear their LBRs on power-up. | 1229 | * Deal with CPUs that don't clear their LBRs on power-up. |
891 | */ | 1230 | */ |
892 | intel_pmu_lbr_reset(); | 1231 | intel_pmu_lbr_reset(); |
1232 | |||
1233 | if (!cpu_has_ht_siblings()) | ||
1234 | return; | ||
1235 | |||
1236 | for_each_cpu(i, topology_thread_cpumask(cpu)) { | ||
1237 | struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; | ||
1238 | |||
1239 | if (pc && pc->core_id == core_id) { | ||
1240 | kfree(cpuc->per_core); | ||
1241 | cpuc->per_core = pc; | ||
1242 | break; | ||
1243 | } | ||
1244 | } | ||
1245 | |||
1246 | cpuc->per_core->core_id = core_id; | ||
1247 | cpuc->per_core->refcnt++; | ||
893 | } | 1248 | } |
894 | 1249 | ||
895 | static void intel_pmu_cpu_dying(int cpu) | 1250 | static void intel_pmu_cpu_dying(int cpu) |
896 | { | 1251 | { |
1252 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
1253 | struct intel_percore *pc = cpuc->per_core; | ||
1254 | |||
1255 | if (pc) { | ||
1256 | if (pc->core_id == -1 || --pc->refcnt == 0) | ||
1257 | kfree(pc); | ||
1258 | cpuc->per_core = NULL; | ||
1259 | } | ||
1260 | |||
897 | fini_debug_store_on_cpu(cpu); | 1261 | fini_debug_store_on_cpu(cpu); |
898 | } | 1262 | } |
899 | 1263 | ||
@@ -918,7 +1282,9 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
918 | */ | 1282 | */ |
919 | .max_period = (1ULL << 31) - 1, | 1283 | .max_period = (1ULL << 31) - 1, |
920 | .get_event_constraints = intel_get_event_constraints, | 1284 | .get_event_constraints = intel_get_event_constraints, |
1285 | .put_event_constraints = intel_put_event_constraints, | ||
921 | 1286 | ||
1287 | .cpu_prepare = intel_pmu_cpu_prepare, | ||
922 | .cpu_starting = intel_pmu_cpu_starting, | 1288 | .cpu_starting = intel_pmu_cpu_starting, |
923 | .cpu_dying = intel_pmu_cpu_dying, | 1289 | .cpu_dying = intel_pmu_cpu_dying, |
924 | }; | 1290 | }; |
@@ -1024,6 +1390,7 @@ static __init int intel_pmu_init(void) | |||
1024 | intel_pmu_lbr_init_core(); | 1390 | intel_pmu_lbr_init_core(); |
1025 | 1391 | ||
1026 | x86_pmu.event_constraints = intel_core2_event_constraints; | 1392 | x86_pmu.event_constraints = intel_core2_event_constraints; |
1393 | x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; | ||
1027 | pr_cont("Core2 events, "); | 1394 | pr_cont("Core2 events, "); |
1028 | break; | 1395 | break; |
1029 | 1396 | ||
@@ -1032,11 +1399,16 @@ static __init int intel_pmu_init(void) | |||
1032 | case 46: /* 45 nm nehalem-ex, "Beckton" */ | 1399 | case 46: /* 45 nm nehalem-ex, "Beckton" */ |
1033 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | 1400 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
1034 | sizeof(hw_cache_event_ids)); | 1401 | sizeof(hw_cache_event_ids)); |
1402 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, | ||
1403 | sizeof(hw_cache_extra_regs)); | ||
1035 | 1404 | ||
1036 | intel_pmu_lbr_init_nhm(); | 1405 | intel_pmu_lbr_init_nhm(); |
1037 | 1406 | ||
1038 | x86_pmu.event_constraints = intel_nehalem_event_constraints; | 1407 | x86_pmu.event_constraints = intel_nehalem_event_constraints; |
1408 | x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; | ||
1409 | x86_pmu.percore_constraints = intel_nehalem_percore_constraints; | ||
1039 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 1410 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1411 | x86_pmu.extra_regs = intel_nehalem_extra_regs; | ||
1040 | pr_cont("Nehalem events, "); | 1412 | pr_cont("Nehalem events, "); |
1041 | break; | 1413 | break; |
1042 | 1414 | ||
@@ -1047,6 +1419,7 @@ static __init int intel_pmu_init(void) | |||
1047 | intel_pmu_lbr_init_atom(); | 1419 | intel_pmu_lbr_init_atom(); |
1048 | 1420 | ||
1049 | x86_pmu.event_constraints = intel_gen_event_constraints; | 1421 | x86_pmu.event_constraints = intel_gen_event_constraints; |
1422 | x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; | ||
1050 | pr_cont("Atom events, "); | 1423 | pr_cont("Atom events, "); |
1051 | break; | 1424 | break; |
1052 | 1425 | ||
@@ -1054,14 +1427,30 @@ static __init int intel_pmu_init(void) | |||
1054 | case 44: /* 32 nm nehalem, "Gulftown" */ | 1427 | case 44: /* 32 nm nehalem, "Gulftown" */ |
1055 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | 1428 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, |
1056 | sizeof(hw_cache_event_ids)); | 1429 | sizeof(hw_cache_event_ids)); |
1430 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, | ||
1431 | sizeof(hw_cache_extra_regs)); | ||
1057 | 1432 | ||
1058 | intel_pmu_lbr_init_nhm(); | 1433 | intel_pmu_lbr_init_nhm(); |
1059 | 1434 | ||
1060 | x86_pmu.event_constraints = intel_westmere_event_constraints; | 1435 | x86_pmu.event_constraints = intel_westmere_event_constraints; |
1436 | x86_pmu.percore_constraints = intel_westmere_percore_constraints; | ||
1061 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 1437 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1438 | x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; | ||
1439 | x86_pmu.extra_regs = intel_westmere_extra_regs; | ||
1062 | pr_cont("Westmere events, "); | 1440 | pr_cont("Westmere events, "); |
1063 | break; | 1441 | break; |
1064 | 1442 | ||
1443 | case 42: /* SandyBridge */ | ||
1444 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | ||
1445 | sizeof(hw_cache_event_ids)); | ||
1446 | |||
1447 | intel_pmu_lbr_init_nhm(); | ||
1448 | |||
1449 | x86_pmu.event_constraints = intel_snb_event_constraints; | ||
1450 | x86_pmu.pebs_constraints = intel_snb_pebs_events; | ||
1451 | pr_cont("SandyBridge events, "); | ||
1452 | break; | ||
1453 | |||
1065 | default: | 1454 | default: |
1066 | /* | 1455 | /* |
1067 | * default constraints for v2 and up | 1456 | * default constraints for v2 and up |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b7dcd9f2b8a0..bab491b8ee25 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -361,30 +361,70 @@ static int intel_pmu_drain_bts_buffer(void) | |||
361 | /* | 361 | /* |
362 | * PEBS | 362 | * PEBS |
363 | */ | 363 | */ |
364 | static struct event_constraint intel_core2_pebs_event_constraints[] = { | ||
365 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ | ||
366 | INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ | ||
367 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ | ||
368 | INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ | ||
369 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ | ||
370 | EVENT_CONSTRAINT_END | ||
371 | }; | ||
372 | |||
373 | static struct event_constraint intel_atom_pebs_event_constraints[] = { | ||
374 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ | ||
375 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ | ||
376 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ | ||
377 | EVENT_CONSTRAINT_END | ||
378 | }; | ||
364 | 379 | ||
365 | static struct event_constraint intel_core_pebs_events[] = { | 380 | static struct event_constraint intel_nehalem_pebs_event_constraints[] = { |
366 | PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */ | 381 | INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ |
367 | PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ | 382 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
368 | PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ | 383 | INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ |
369 | PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ | 384 | INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ |
370 | PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */ | 385 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ |
371 | PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */ | 386 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
372 | PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */ | 387 | INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ |
373 | PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */ | 388 | INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ |
374 | PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */ | 389 | INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ |
390 | INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ | ||
391 | INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ | ||
375 | EVENT_CONSTRAINT_END | 392 | EVENT_CONSTRAINT_END |
376 | }; | 393 | }; |
377 | 394 | ||
378 | static struct event_constraint intel_nehalem_pebs_events[] = { | 395 | static struct event_constraint intel_westmere_pebs_event_constraints[] = { |
379 | PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */ | 396 | INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ |
380 | PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */ | 397 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
381 | PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */ | 398 | INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ |
382 | PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */ | 399 | INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ |
383 | PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */ | 400 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ |
384 | PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */ | 401 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
385 | PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */ | 402 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ |
386 | PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */ | 403 | INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ |
387 | PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */ | 404 | INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ |
405 | INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ | ||
406 | INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ | ||
407 | EVENT_CONSTRAINT_END | ||
408 | }; | ||
409 | |||
410 | static struct event_constraint intel_snb_pebs_events[] = { | ||
411 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | ||
412 | INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ | ||
413 | INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ | ||
414 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | ||
415 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ | ||
416 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ | ||
417 | INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ | ||
418 | INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ | ||
419 | INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ | ||
420 | INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ | ||
421 | INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ | ||
422 | INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ | ||
423 | INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ | ||
424 | INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ | ||
425 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | ||
426 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | ||
427 | INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ | ||
388 | EVENT_CONSTRAINT_END | 428 | EVENT_CONSTRAINT_END |
389 | }; | 429 | }; |
390 | 430 | ||
@@ -695,20 +735,17 @@ static void intel_ds_init(void) | |||
695 | printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); | 735 | printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); |
696 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); | 736 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); |
697 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; | 737 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; |
698 | x86_pmu.pebs_constraints = intel_core_pebs_events; | ||
699 | break; | 738 | break; |
700 | 739 | ||
701 | case 1: | 740 | case 1: |
702 | printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); | 741 | printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); |
703 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); | 742 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); |
704 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; | 743 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; |
705 | x86_pmu.pebs_constraints = intel_nehalem_pebs_events; | ||
706 | break; | 744 | break; |
707 | 745 | ||
708 | default: | 746 | default: |
709 | printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); | 747 | printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); |
710 | x86_pmu.pebs = 0; | 748 | x86_pmu.pebs = 0; |
711 | break; | ||
712 | } | 749 | } |
713 | } | 750 | } |
714 | } | 751 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ff751a9f182b..0811f5ebfba6 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Netburst Perfomance Events (P4, old Xeon) | 2 | * Netburst Performance Events (P4, old Xeon) |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org> | 4 | * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org> |
5 | * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com> | 5 | * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com> |
@@ -679,7 +679,7 @@ static int p4_validate_raw_event(struct perf_event *event) | |||
679 | */ | 679 | */ |
680 | 680 | ||
681 | /* | 681 | /* |
682 | * if an event is shared accross the logical threads | 682 | * if an event is shared across the logical threads |
683 | * the user needs special permissions to be able to use it | 683 | * the user needs special permissions to be able to use it |
684 | */ | 684 | */ |
685 | if (p4_ht_active() && p4_event_bind_map[v].shared) { | 685 | if (p4_ht_active() && p4_event_bind_map[v].shared) { |
@@ -764,9 +764,9 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) | |||
764 | u64 v; | 764 | u64 v; |
765 | 765 | ||
766 | /* an official way for overflow indication */ | 766 | /* an official way for overflow indication */ |
767 | rdmsrl(hwc->config_base + hwc->idx, v); | 767 | rdmsrl(hwc->config_base, v); |
768 | if (v & P4_CCCR_OVF) { | 768 | if (v & P4_CCCR_OVF) { |
769 | wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF); | 769 | wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF); |
770 | return 1; | 770 | return 1; |
771 | } | 771 | } |
772 | 772 | ||
@@ -790,13 +790,13 @@ static void p4_pmu_disable_pebs(void) | |||
790 | * | 790 | * |
791 | * It's still allowed that two threads setup same cache | 791 | * It's still allowed that two threads setup same cache |
792 | * events so we can't simply clear metrics until we knew | 792 | * events so we can't simply clear metrics until we knew |
793 | * noone is depending on us, so we need kind of counter | 793 | * no one is depending on us, so we need kind of counter |
794 | * for "ReplayEvent" users. | 794 | * for "ReplayEvent" users. |
795 | * | 795 | * |
796 | * What is more complex -- RAW events, if user (for some | 796 | * What is more complex -- RAW events, if user (for some |
797 | * reason) will pass some cache event metric with improper | 797 | * reason) will pass some cache event metric with improper |
798 | * event opcode -- it's fine from hardware point of view | 798 | * event opcode -- it's fine from hardware point of view |
799 | * but completely nonsence from "meaning" of such action. | 799 | * but completely nonsense from "meaning" of such action. |
800 | * | 800 | * |
801 | * So at moment let leave metrics turned on forever -- it's | 801 | * So at moment let leave metrics turned on forever -- it's |
802 | * ok for now but need to be revisited! | 802 | * ok for now but need to be revisited! |
@@ -815,7 +815,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) | |||
815 | * state we need to clear P4_CCCR_OVF, otherwise interrupt get | 815 | * state we need to clear P4_CCCR_OVF, otherwise interrupt get |
816 | * asserted again and again | 816 | * asserted again and again |
817 | */ | 817 | */ |
818 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | 818 | (void)checking_wrmsrl(hwc->config_base, |
819 | (u64)(p4_config_unpack_cccr(hwc->config)) & | 819 | (u64)(p4_config_unpack_cccr(hwc->config)) & |
820 | ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); | 820 | ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); |
821 | } | 821 | } |
@@ -885,7 +885,7 @@ static void p4_pmu_enable_event(struct perf_event *event) | |||
885 | p4_pmu_enable_pebs(hwc->config); | 885 | p4_pmu_enable_pebs(hwc->config); |
886 | 886 | ||
887 | (void)checking_wrmsrl(escr_addr, escr_conf); | 887 | (void)checking_wrmsrl(escr_addr, escr_conf); |
888 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | 888 | (void)checking_wrmsrl(hwc->config_base, |
889 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); | 889 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); |
890 | } | 890 | } |
891 | 891 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 34ba07be2cda..20c097e33860 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -68,7 +68,7 @@ p6_pmu_disable_event(struct perf_event *event) | |||
68 | if (cpuc->enabled) | 68 | if (cpuc->enabled) |
69 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 69 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
70 | 70 | ||
71 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); | 71 | (void)checking_wrmsrl(hwc->config_base, val); |
72 | } | 72 | } |
73 | 73 | ||
74 | static void p6_pmu_enable_event(struct perf_event *event) | 74 | static void p6_pmu_enable_event(struct perf_event *event) |
@@ -81,7 +81,7 @@ static void p6_pmu_enable_event(struct perf_event *event) | |||
81 | if (cpuc->enabled) | 81 | if (cpuc->enabled) |
82 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 82 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
83 | 83 | ||
84 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); | 84 | (void)checking_wrmsrl(hwc->config_base, val); |
85 | } | 85 | } |
86 | 86 | ||
87 | static __initconst const struct x86_pmu p6_pmu = { | 87 | static __initconst const struct x86_pmu p6_pmu = { |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index d5a236615501..966512b2cacf 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -46,6 +46,8 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | |||
46 | /* returns the bit offset of the performance counter register */ | 46 | /* returns the bit offset of the performance counter register */ |
47 | switch (boot_cpu_data.x86_vendor) { | 47 | switch (boot_cpu_data.x86_vendor) { |
48 | case X86_VENDOR_AMD: | 48 | case X86_VENDOR_AMD: |
49 | if (msr >= MSR_F15H_PERF_CTR) | ||
50 | return (msr - MSR_F15H_PERF_CTR) >> 1; | ||
49 | return msr - MSR_K7_PERFCTR0; | 51 | return msr - MSR_K7_PERFCTR0; |
50 | case X86_VENDOR_INTEL: | 52 | case X86_VENDOR_INTEL: |
51 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 53 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
@@ -70,6 +72,8 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |||
70 | /* returns the bit offset of the event selection register */ | 72 | /* returns the bit offset of the event selection register */ |
71 | switch (boot_cpu_data.x86_vendor) { | 73 | switch (boot_cpu_data.x86_vendor) { |
72 | case X86_VENDOR_AMD: | 74 | case X86_VENDOR_AMD: |
75 | if (msr >= MSR_F15H_PERF_CTL) | ||
76 | return (msr - MSR_F15H_PERF_CTL) >> 1; | ||
73 | return msr - MSR_K7_EVNTSEL0; | 77 | return msr - MSR_K7_EVNTSEL0; |
74 | case X86_VENDOR_INTEL: | 78 | case X86_VENDOR_INTEL: |
75 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 79 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 227b0448960d..d22d0c4edcfd 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -86,7 +86,7 @@ static void __init vmware_platform_setup(void) | |||
86 | } | 86 | } |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * While checking the dmi string infomation, just checking the product | 89 | * While checking the dmi string information, just checking the product |
90 | * serial key should be enough, as this will always have a VMware | 90 | * serial key should be enough, as this will always have a VMware |
91 | * specific string when running under VMware hypervisor. | 91 | * specific string when running under VMware hypervisor. |
92 | */ | 92 | */ |
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c new file mode 100644 index 000000000000..7a8cebc9ff29 --- /dev/null +++ b/arch/x86/kernel/devicetree.c | |||
@@ -0,0 +1,441 @@ | |||
1 | /* | ||
2 | * Architecture specific OF callbacks. | ||
3 | */ | ||
4 | #include <linux/bootmem.h> | ||
5 | #include <linux/io.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/list.h> | ||
8 | #include <linux/of.h> | ||
9 | #include <linux/of_fdt.h> | ||
10 | #include <linux/of_address.h> | ||
11 | #include <linux/of_platform.h> | ||
12 | #include <linux/of_irq.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/of_pci.h> | ||
16 | |||
17 | #include <asm/hpet.h> | ||
18 | #include <asm/irq_controller.h> | ||
19 | #include <asm/apic.h> | ||
20 | #include <asm/pci_x86.h> | ||
21 | |||
22 | __initdata u64 initial_dtb; | ||
23 | char __initdata cmd_line[COMMAND_LINE_SIZE]; | ||
24 | static LIST_HEAD(irq_domains); | ||
25 | static DEFINE_RAW_SPINLOCK(big_irq_lock); | ||
26 | |||
27 | int __initdata of_ioapic; | ||
28 | |||
29 | #ifdef CONFIG_X86_IO_APIC | ||
30 | static void add_interrupt_host(struct irq_domain *ih) | ||
31 | { | ||
32 | unsigned long flags; | ||
33 | |||
34 | raw_spin_lock_irqsave(&big_irq_lock, flags); | ||
35 | list_add(&ih->l, &irq_domains); | ||
36 | raw_spin_unlock_irqrestore(&big_irq_lock, flags); | ||
37 | } | ||
38 | #endif | ||
39 | |||
40 | static struct irq_domain *get_ih_from_node(struct device_node *controller) | ||
41 | { | ||
42 | struct irq_domain *ih, *found = NULL; | ||
43 | unsigned long flags; | ||
44 | |||
45 | raw_spin_lock_irqsave(&big_irq_lock, flags); | ||
46 | list_for_each_entry(ih, &irq_domains, l) { | ||
47 | if (ih->controller == controller) { | ||
48 | found = ih; | ||
49 | break; | ||
50 | } | ||
51 | } | ||
52 | raw_spin_unlock_irqrestore(&big_irq_lock, flags); | ||
53 | return found; | ||
54 | } | ||
55 | |||
56 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
57 | const u32 *intspec, unsigned int intsize) | ||
58 | { | ||
59 | struct irq_domain *ih; | ||
60 | u32 virq, type; | ||
61 | int ret; | ||
62 | |||
63 | ih = get_ih_from_node(controller); | ||
64 | if (!ih) | ||
65 | return 0; | ||
66 | ret = ih->xlate(ih, intspec, intsize, &virq, &type); | ||
67 | if (ret) | ||
68 | return ret; | ||
69 | if (type == IRQ_TYPE_NONE) | ||
70 | return virq; | ||
71 | /* set the mask if it is different from current */ | ||
72 | if (type == (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) | ||
73 | set_irq_type(virq, type); | ||
74 | return virq; | ||
75 | } | ||
76 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
77 | |||
78 | unsigned long pci_address_to_pio(phys_addr_t address) | ||
79 | { | ||
80 | /* | ||
81 | * The ioport address can be directly used by inX / outX | ||
82 | */ | ||
83 | BUG_ON(address >= (1 << 16)); | ||
84 | return (unsigned long)address; | ||
85 | } | ||
86 | EXPORT_SYMBOL_GPL(pci_address_to_pio); | ||
87 | |||
88 | void __init early_init_dt_scan_chosen_arch(unsigned long node) | ||
89 | { | ||
90 | BUG(); | ||
91 | } | ||
92 | |||
93 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | ||
94 | { | ||
95 | BUG(); | ||
96 | } | ||
97 | |||
98 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | ||
99 | { | ||
100 | return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); | ||
101 | } | ||
102 | |||
103 | void __init add_dtb(u64 data) | ||
104 | { | ||
105 | initial_dtb = data + offsetof(struct setup_data, data); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * CE4100 ids. Will be moved to machine_device_initcall() once we have it. | ||
110 | */ | ||
111 | static struct of_device_id __initdata ce4100_ids[] = { | ||
112 | { .compatible = "intel,ce4100-cp", }, | ||
113 | { .compatible = "isa", }, | ||
114 | { .compatible = "pci", }, | ||
115 | {}, | ||
116 | }; | ||
117 | |||
118 | static int __init add_bus_probe(void) | ||
119 | { | ||
120 | if (!of_have_populated_dt()) | ||
121 | return 0; | ||
122 | |||
123 | return of_platform_bus_probe(NULL, ce4100_ids, NULL); | ||
124 | } | ||
125 | module_init(add_bus_probe); | ||
126 | |||
127 | #ifdef CONFIG_PCI | ||
128 | static int x86_of_pci_irq_enable(struct pci_dev *dev) | ||
129 | { | ||
130 | struct of_irq oirq; | ||
131 | u32 virq; | ||
132 | int ret; | ||
133 | u8 pin; | ||
134 | |||
135 | ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | ||
136 | if (ret) | ||
137 | return ret; | ||
138 | if (!pin) | ||
139 | return 0; | ||
140 | |||
141 | ret = of_irq_map_pci(dev, &oirq); | ||
142 | if (ret) | ||
143 | return ret; | ||
144 | |||
145 | virq = irq_create_of_mapping(oirq.controller, oirq.specifier, | ||
146 | oirq.size); | ||
147 | if (virq == 0) | ||
148 | return -EINVAL; | ||
149 | dev->irq = virq; | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static void x86_of_pci_irq_disable(struct pci_dev *dev) | ||
154 | { | ||
155 | } | ||
156 | |||
157 | void __cpuinit x86_of_pci_init(void) | ||
158 | { | ||
159 | struct device_node *np; | ||
160 | |||
161 | pcibios_enable_irq = x86_of_pci_irq_enable; | ||
162 | pcibios_disable_irq = x86_of_pci_irq_disable; | ||
163 | |||
164 | for_each_node_by_type(np, "pci") { | ||
165 | const void *prop; | ||
166 | struct pci_bus *bus; | ||
167 | unsigned int bus_min; | ||
168 | struct device_node *child; | ||
169 | |||
170 | prop = of_get_property(np, "bus-range", NULL); | ||
171 | if (!prop) | ||
172 | continue; | ||
173 | bus_min = be32_to_cpup(prop); | ||
174 | |||
175 | bus = pci_find_bus(0, bus_min); | ||
176 | if (!bus) { | ||
177 | printk(KERN_ERR "Can't find a node for bus %s.\n", | ||
178 | np->full_name); | ||
179 | continue; | ||
180 | } | ||
181 | |||
182 | if (bus->self) | ||
183 | bus->self->dev.of_node = np; | ||
184 | else | ||
185 | bus->dev.of_node = np; | ||
186 | |||
187 | for_each_child_of_node(np, child) { | ||
188 | struct pci_dev *dev; | ||
189 | u32 devfn; | ||
190 | |||
191 | prop = of_get_property(child, "reg", NULL); | ||
192 | if (!prop) | ||
193 | continue; | ||
194 | |||
195 | devfn = (be32_to_cpup(prop) >> 8) & 0xff; | ||
196 | dev = pci_get_slot(bus, devfn); | ||
197 | if (!dev) | ||
198 | continue; | ||
199 | dev->dev.of_node = child; | ||
200 | pci_dev_put(dev); | ||
201 | } | ||
202 | } | ||
203 | } | ||
204 | #endif | ||
205 | |||
206 | static void __init dtb_setup_hpet(void) | ||
207 | { | ||
208 | #ifdef CONFIG_HPET_TIMER | ||
209 | struct device_node *dn; | ||
210 | struct resource r; | ||
211 | int ret; | ||
212 | |||
213 | dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-hpet"); | ||
214 | if (!dn) | ||
215 | return; | ||
216 | ret = of_address_to_resource(dn, 0, &r); | ||
217 | if (ret) { | ||
218 | WARN_ON(1); | ||
219 | return; | ||
220 | } | ||
221 | hpet_address = r.start; | ||
222 | #endif | ||
223 | } | ||
224 | |||
225 | static void __init dtb_lapic_setup(void) | ||
226 | { | ||
227 | #ifdef CONFIG_X86_LOCAL_APIC | ||
228 | struct device_node *dn; | ||
229 | struct resource r; | ||
230 | int ret; | ||
231 | |||
232 | dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-lapic"); | ||
233 | if (!dn) | ||
234 | return; | ||
235 | |||
236 | ret = of_address_to_resource(dn, 0, &r); | ||
237 | if (WARN_ON(ret)) | ||
238 | return; | ||
239 | |||
240 | /* Did the boot loader setup the local APIC ? */ | ||
241 | if (!cpu_has_apic) { | ||
242 | if (apic_force_enable(r.start)) | ||
243 | return; | ||
244 | } | ||
245 | smp_found_config = 1; | ||
246 | pic_mode = 1; | ||
247 | register_lapic_address(r.start); | ||
248 | generic_processor_info(boot_cpu_physical_apicid, | ||
249 | GET_APIC_VERSION(apic_read(APIC_LVR))); | ||
250 | #endif | ||
251 | } | ||
252 | |||
253 | #ifdef CONFIG_X86_IO_APIC | ||
254 | static unsigned int ioapic_id; | ||
255 | |||
256 | static void __init dtb_add_ioapic(struct device_node *dn) | ||
257 | { | ||
258 | struct resource r; | ||
259 | int ret; | ||
260 | |||
261 | ret = of_address_to_resource(dn, 0, &r); | ||
262 | if (ret) { | ||
263 | printk(KERN_ERR "Can't obtain address from node %s.\n", | ||
264 | dn->full_name); | ||
265 | return; | ||
266 | } | ||
267 | mp_register_ioapic(++ioapic_id, r.start, gsi_top); | ||
268 | } | ||
269 | |||
270 | static void __init dtb_ioapic_setup(void) | ||
271 | { | ||
272 | struct device_node *dn; | ||
273 | |||
274 | for_each_compatible_node(dn, NULL, "intel,ce4100-ioapic") | ||
275 | dtb_add_ioapic(dn); | ||
276 | |||
277 | if (nr_ioapics) { | ||
278 | of_ioapic = 1; | ||
279 | return; | ||
280 | } | ||
281 | printk(KERN_ERR "Error: No information about IO-APIC in OF.\n"); | ||
282 | } | ||
283 | #else | ||
284 | static void __init dtb_ioapic_setup(void) {} | ||
285 | #endif | ||
286 | |||
287 | static void __init dtb_apic_setup(void) | ||
288 | { | ||
289 | dtb_lapic_setup(); | ||
290 | dtb_ioapic_setup(); | ||
291 | } | ||
292 | |||
293 | #ifdef CONFIG_OF_FLATTREE | ||
294 | static void __init x86_flattree_get_config(void) | ||
295 | { | ||
296 | u32 size, map_len; | ||
297 | void *new_dtb; | ||
298 | |||
299 | if (!initial_dtb) | ||
300 | return; | ||
301 | |||
302 | map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), | ||
303 | (u64)sizeof(struct boot_param_header)); | ||
304 | |||
305 | initial_boot_params = early_memremap(initial_dtb, map_len); | ||
306 | size = be32_to_cpu(initial_boot_params->totalsize); | ||
307 | if (map_len < size) { | ||
308 | early_iounmap(initial_boot_params, map_len); | ||
309 | initial_boot_params = early_memremap(initial_dtb, size); | ||
310 | map_len = size; | ||
311 | } | ||
312 | |||
313 | new_dtb = alloc_bootmem(size); | ||
314 | memcpy(new_dtb, initial_boot_params, size); | ||
315 | early_iounmap(initial_boot_params, map_len); | ||
316 | |||
317 | initial_boot_params = new_dtb; | ||
318 | |||
319 | /* root level address cells */ | ||
320 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | ||
321 | |||
322 | unflatten_device_tree(); | ||
323 | } | ||
324 | #else | ||
325 | static inline void x86_flattree_get_config(void) { } | ||
326 | #endif | ||
327 | |||
328 | void __init x86_dtb_init(void) | ||
329 | { | ||
330 | x86_flattree_get_config(); | ||
331 | |||
332 | if (!of_have_populated_dt()) | ||
333 | return; | ||
334 | |||
335 | dtb_setup_hpet(); | ||
336 | dtb_apic_setup(); | ||
337 | } | ||
338 | |||
339 | #ifdef CONFIG_X86_IO_APIC | ||
340 | |||
341 | struct of_ioapic_type { | ||
342 | u32 out_type; | ||
343 | u32 trigger; | ||
344 | u32 polarity; | ||
345 | }; | ||
346 | |||
347 | static struct of_ioapic_type of_ioapic_type[] = | ||
348 | { | ||
349 | { | ||
350 | .out_type = IRQ_TYPE_EDGE_RISING, | ||
351 | .trigger = IOAPIC_EDGE, | ||
352 | .polarity = 1, | ||
353 | }, | ||
354 | { | ||
355 | .out_type = IRQ_TYPE_LEVEL_LOW, | ||
356 | .trigger = IOAPIC_LEVEL, | ||
357 | .polarity = 0, | ||
358 | }, | ||
359 | { | ||
360 | .out_type = IRQ_TYPE_LEVEL_HIGH, | ||
361 | .trigger = IOAPIC_LEVEL, | ||
362 | .polarity = 1, | ||
363 | }, | ||
364 | { | ||
365 | .out_type = IRQ_TYPE_EDGE_FALLING, | ||
366 | .trigger = IOAPIC_EDGE, | ||
367 | .polarity = 0, | ||
368 | }, | ||
369 | }; | ||
370 | |||
371 | static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize, | ||
372 | u32 *out_hwirq, u32 *out_type) | ||
373 | { | ||
374 | struct io_apic_irq_attr attr; | ||
375 | struct of_ioapic_type *it; | ||
376 | u32 line, idx, type; | ||
377 | |||
378 | if (intsize < 2) | ||
379 | return -EINVAL; | ||
380 | |||
381 | line = *intspec; | ||
382 | idx = (u32) id->priv; | ||
383 | *out_hwirq = line + mp_gsi_routing[idx].gsi_base; | ||
384 | |||
385 | intspec++; | ||
386 | type = *intspec; | ||
387 | |||
388 | if (type >= ARRAY_SIZE(of_ioapic_type)) | ||
389 | return -EINVAL; | ||
390 | |||
391 | it = of_ioapic_type + type; | ||
392 | *out_type = it->out_type; | ||
393 | |||
394 | set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); | ||
395 | |||
396 | return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr); | ||
397 | } | ||
398 | |||
399 | static void __init ioapic_add_ofnode(struct device_node *np) | ||
400 | { | ||
401 | struct resource r; | ||
402 | int i, ret; | ||
403 | |||
404 | ret = of_address_to_resource(np, 0, &r); | ||
405 | if (ret) { | ||
406 | printk(KERN_ERR "Failed to obtain address for %s\n", | ||
407 | np->full_name); | ||
408 | return; | ||
409 | } | ||
410 | |||
411 | for (i = 0; i < nr_ioapics; i++) { | ||
412 | if (r.start == mp_ioapics[i].apicaddr) { | ||
413 | struct irq_domain *id; | ||
414 | |||
415 | id = kzalloc(sizeof(*id), GFP_KERNEL); | ||
416 | BUG_ON(!id); | ||
417 | id->controller = np; | ||
418 | id->xlate = ioapic_xlate; | ||
419 | id->priv = (void *)i; | ||
420 | add_interrupt_host(id); | ||
421 | return; | ||
422 | } | ||
423 | } | ||
424 | printk(KERN_ERR "IOxAPIC at %s is not registered.\n", np->full_name); | ||
425 | } | ||
426 | |||
427 | void __init x86_add_irq_domains(void) | ||
428 | { | ||
429 | struct device_node *dp; | ||
430 | |||
431 | if (!of_have_populated_dt()) | ||
432 | return; | ||
433 | |||
434 | for_each_node_with_property(dp, "interrupt-controller") { | ||
435 | if (of_device_is_compatible(dp, "intel,ce4100-ioapic")) | ||
436 | ioapic_add_ofnode(dp); | ||
437 | } | ||
438 | } | ||
439 | #else | ||
440 | void __init x86_add_irq_domains(void) { } | ||
441 | #endif | ||
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index df20723a6a1b..81ac6c78c01c 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -175,21 +175,21 @@ static const struct stacktrace_ops print_trace_ops = { | |||
175 | 175 | ||
176 | void | 176 | void |
177 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 177 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
178 | unsigned long *stack, char *log_lvl) | 178 | unsigned long *stack, unsigned long bp, char *log_lvl) |
179 | { | 179 | { |
180 | printk("%sCall Trace:\n", log_lvl); | 180 | printk("%sCall Trace:\n", log_lvl); |
181 | dump_trace(task, regs, stack, &print_trace_ops, log_lvl); | 181 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); |
182 | } | 182 | } |
183 | 183 | ||
184 | void show_trace(struct task_struct *task, struct pt_regs *regs, | 184 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
185 | unsigned long *stack) | 185 | unsigned long *stack, unsigned long bp) |
186 | { | 186 | { |
187 | show_trace_log_lvl(task, regs, stack, ""); | 187 | show_trace_log_lvl(task, regs, stack, bp, ""); |
188 | } | 188 | } |
189 | 189 | ||
190 | void show_stack(struct task_struct *task, unsigned long *sp) | 190 | void show_stack(struct task_struct *task, unsigned long *sp) |
191 | { | 191 | { |
192 | show_stack_log_lvl(task, NULL, sp, ""); | 192 | show_stack_log_lvl(task, NULL, sp, 0, ""); |
193 | } | 193 | } |
194 | 194 | ||
195 | /* | 195 | /* |
@@ -197,14 +197,16 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
197 | */ | 197 | */ |
198 | void dump_stack(void) | 198 | void dump_stack(void) |
199 | { | 199 | { |
200 | unsigned long bp; | ||
200 | unsigned long stack; | 201 | unsigned long stack; |
201 | 202 | ||
203 | bp = stack_frame(current, NULL); | ||
202 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 204 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", |
203 | current->pid, current->comm, print_tainted(), | 205 | current->pid, current->comm, print_tainted(), |
204 | init_utsname()->release, | 206 | init_utsname()->release, |
205 | (int)strcspn(init_utsname()->version, " "), | 207 | (int)strcspn(init_utsname()->version, " "), |
206 | init_utsname()->version); | 208 | init_utsname()->version); |
207 | show_trace(NULL, NULL, &stack); | 209 | show_trace(NULL, NULL, &stack, bp); |
208 | } | 210 | } |
209 | EXPORT_SYMBOL(dump_stack); | 211 | EXPORT_SYMBOL(dump_stack); |
210 | 212 | ||
@@ -320,41 +322,6 @@ void die(const char *str, struct pt_regs *regs, long err) | |||
320 | oops_end(flags, regs, sig); | 322 | oops_end(flags, regs, sig); |
321 | } | 323 | } |
322 | 324 | ||
323 | void notrace __kprobes | ||
324 | die_nmi(char *str, struct pt_regs *regs, int do_panic) | ||
325 | { | ||
326 | unsigned long flags; | ||
327 | |||
328 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) | ||
329 | return; | ||
330 | |||
331 | /* | ||
332 | * We are in trouble anyway, lets at least try | ||
333 | * to get a message out. | ||
334 | */ | ||
335 | flags = oops_begin(); | ||
336 | printk(KERN_EMERG "%s", str); | ||
337 | printk(" on CPU%d, ip %08lx, registers:\n", | ||
338 | smp_processor_id(), regs->ip); | ||
339 | show_registers(regs); | ||
340 | oops_end(flags, regs, 0); | ||
341 | if (do_panic || panic_on_oops) | ||
342 | panic("Non maskable interrupt"); | ||
343 | nmi_exit(); | ||
344 | local_irq_enable(); | ||
345 | do_exit(SIGBUS); | ||
346 | } | ||
347 | |||
348 | static int __init oops_setup(char *s) | ||
349 | { | ||
350 | if (!s) | ||
351 | return -EINVAL; | ||
352 | if (!strcmp(s, "panic")) | ||
353 | panic_on_oops = 1; | ||
354 | return 0; | ||
355 | } | ||
356 | early_param("oops", oops_setup); | ||
357 | |||
358 | static int __init kstack_setup(char *s) | 325 | static int __init kstack_setup(char *s) |
359 | { | 326 | { |
360 | if (!s) | 327 | if (!s) |
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 74cc1eda384b..3b97a80ce329 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c | |||
@@ -17,12 +17,11 @@ | |||
17 | #include <asm/stacktrace.h> | 17 | #include <asm/stacktrace.h> |
18 | 18 | ||
19 | 19 | ||
20 | void dump_trace(struct task_struct *task, | 20 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
21 | struct pt_regs *regs, unsigned long *stack, | 21 | unsigned long *stack, unsigned long bp, |
22 | const struct stacktrace_ops *ops, void *data) | 22 | const struct stacktrace_ops *ops, void *data) |
23 | { | 23 | { |
24 | int graph = 0; | 24 | int graph = 0; |
25 | unsigned long bp; | ||
26 | 25 | ||
27 | if (!task) | 26 | if (!task) |
28 | task = current; | 27 | task = current; |
@@ -35,7 +34,9 @@ void dump_trace(struct task_struct *task, | |||
35 | stack = (unsigned long *)task->thread.sp; | 34 | stack = (unsigned long *)task->thread.sp; |
36 | } | 35 | } |
37 | 36 | ||
38 | bp = stack_frame(task, regs); | 37 | if (!bp) |
38 | bp = stack_frame(task, regs); | ||
39 | |||
39 | for (;;) { | 40 | for (;;) { |
40 | struct thread_info *context; | 41 | struct thread_info *context; |
41 | 42 | ||
@@ -55,7 +56,7 @@ EXPORT_SYMBOL(dump_trace); | |||
55 | 56 | ||
56 | void | 57 | void |
57 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | 58 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
58 | unsigned long *sp, char *log_lvl) | 59 | unsigned long *sp, unsigned long bp, char *log_lvl) |
59 | { | 60 | { |
60 | unsigned long *stack; | 61 | unsigned long *stack; |
61 | int i; | 62 | int i; |
@@ -77,7 +78,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
77 | touch_nmi_watchdog(); | 78 | touch_nmi_watchdog(); |
78 | } | 79 | } |
79 | printk(KERN_CONT "\n"); | 80 | printk(KERN_CONT "\n"); |
80 | show_trace_log_lvl(task, regs, sp, log_lvl); | 81 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); |
81 | } | 82 | } |
82 | 83 | ||
83 | 84 | ||
@@ -102,7 +103,7 @@ void show_registers(struct pt_regs *regs) | |||
102 | u8 *ip; | 103 | u8 *ip; |
103 | 104 | ||
104 | printk(KERN_EMERG "Stack:\n"); | 105 | printk(KERN_EMERG "Stack:\n"); |
105 | show_stack_log_lvl(NULL, regs, ®s->sp, KERN_EMERG); | 106 | show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); |
106 | 107 | ||
107 | printk(KERN_EMERG "Code: "); | 108 | printk(KERN_EMERG "Code: "); |
108 | 109 | ||
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index a6b6fcf7f0ae..e71c98d3c0d2 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -139,8 +139,8 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack, | |||
139 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | 139 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack |
140 | */ | 140 | */ |
141 | 141 | ||
142 | void dump_trace(struct task_struct *task, | 142 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
143 | struct pt_regs *regs, unsigned long *stack, | 143 | unsigned long *stack, unsigned long bp, |
144 | const struct stacktrace_ops *ops, void *data) | 144 | const struct stacktrace_ops *ops, void *data) |
145 | { | 145 | { |
146 | const unsigned cpu = get_cpu(); | 146 | const unsigned cpu = get_cpu(); |
@@ -150,7 +150,6 @@ void dump_trace(struct task_struct *task, | |||
150 | struct thread_info *tinfo; | 150 | struct thread_info *tinfo; |
151 | int graph = 0; | 151 | int graph = 0; |
152 | unsigned long dummy; | 152 | unsigned long dummy; |
153 | unsigned long bp; | ||
154 | 153 | ||
155 | if (!task) | 154 | if (!task) |
156 | task = current; | 155 | task = current; |
@@ -161,7 +160,8 @@ void dump_trace(struct task_struct *task, | |||
161 | stack = (unsigned long *)task->thread.sp; | 160 | stack = (unsigned long *)task->thread.sp; |
162 | } | 161 | } |
163 | 162 | ||
164 | bp = stack_frame(task, regs); | 163 | if (!bp) |
164 | bp = stack_frame(task, regs); | ||
165 | /* | 165 | /* |
166 | * Print function call entries in all stacks, starting at the | 166 | * Print function call entries in all stacks, starting at the |
167 | * current stack address. If the stacks consist of nested | 167 | * current stack address. If the stacks consist of nested |
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(dump_trace); | |||
225 | 225 | ||
226 | void | 226 | void |
227 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | 227 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
228 | unsigned long *sp, char *log_lvl) | 228 | unsigned long *sp, unsigned long bp, char *log_lvl) |
229 | { | 229 | { |
230 | unsigned long *irq_stack_end; | 230 | unsigned long *irq_stack_end; |
231 | unsigned long *irq_stack; | 231 | unsigned long *irq_stack; |
@@ -269,7 +269,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
269 | preempt_enable(); | 269 | preempt_enable(); |
270 | 270 | ||
271 | printk(KERN_CONT "\n"); | 271 | printk(KERN_CONT "\n"); |
272 | show_trace_log_lvl(task, regs, sp, log_lvl); | 272 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); |
273 | } | 273 | } |
274 | 274 | ||
275 | void show_registers(struct pt_regs *regs) | 275 | void show_registers(struct pt_regs *regs) |
@@ -298,7 +298,7 @@ void show_registers(struct pt_regs *regs) | |||
298 | 298 | ||
299 | printk(KERN_EMERG "Stack:\n"); | 299 | printk(KERN_EMERG "Stack:\n"); |
300 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, | 300 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, |
301 | KERN_EMERG); | 301 | 0, KERN_EMERG); |
302 | 302 | ||
303 | printk(KERN_EMERG "Code: "); | 303 | printk(KERN_EMERG "Code: "); |
304 | 304 | ||
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 294f26da0c0c..cdf5bfd9d4d5 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -667,21 +667,15 @@ __init void e820_setup_gap(void) | |||
667 | * boot_params.e820_map, others are passed via SETUP_E820_EXT node of | 667 | * boot_params.e820_map, others are passed via SETUP_E820_EXT node of |
668 | * linked list of struct setup_data, which is parsed here. | 668 | * linked list of struct setup_data, which is parsed here. |
669 | */ | 669 | */ |
670 | void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data) | 670 | void __init parse_e820_ext(struct setup_data *sdata) |
671 | { | 671 | { |
672 | u32 map_len; | ||
673 | int entries; | 672 | int entries; |
674 | struct e820entry *extmap; | 673 | struct e820entry *extmap; |
675 | 674 | ||
676 | entries = sdata->len / sizeof(struct e820entry); | 675 | entries = sdata->len / sizeof(struct e820entry); |
677 | map_len = sdata->len + sizeof(struct setup_data); | ||
678 | if (map_len > PAGE_SIZE) | ||
679 | sdata = early_ioremap(pa_data, map_len); | ||
680 | extmap = (struct e820entry *)(sdata->data); | 676 | extmap = (struct e820entry *)(sdata->data); |
681 | __append_e820_map(extmap, entries); | 677 | __append_e820_map(extmap, entries); |
682 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | 678 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
683 | if (map_len > PAGE_SIZE) | ||
684 | early_iounmap(sdata, map_len); | ||
685 | printk(KERN_INFO "extended physical RAM map:\n"); | 679 | printk(KERN_INFO "extended physical RAM map:\n"); |
686 | e820_print_map("extended"); | 680 | e820_print_map("extended"); |
687 | } | 681 | } |
@@ -847,15 +841,21 @@ static int __init parse_memopt(char *p) | |||
847 | if (!p) | 841 | if (!p) |
848 | return -EINVAL; | 842 | return -EINVAL; |
849 | 843 | ||
850 | #ifdef CONFIG_X86_32 | ||
851 | if (!strcmp(p, "nopentium")) { | 844 | if (!strcmp(p, "nopentium")) { |
845 | #ifdef CONFIG_X86_32 | ||
852 | setup_clear_cpu_cap(X86_FEATURE_PSE); | 846 | setup_clear_cpu_cap(X86_FEATURE_PSE); |
853 | return 0; | 847 | return 0; |
854 | } | 848 | #else |
849 | printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); | ||
850 | return -EINVAL; | ||
855 | #endif | 851 | #endif |
852 | } | ||
856 | 853 | ||
857 | userdef = 1; | 854 | userdef = 1; |
858 | mem_size = memparse(p, &p); | 855 | mem_size = memparse(p, &p); |
856 | /* don't remove all of memory when handling "mem={invalid}" param */ | ||
857 | if (mem_size == 0) | ||
858 | return -EINVAL; | ||
859 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); | 859 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); |
860 | 860 | ||
861 | return 0; | 861 | return 0; |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 9efbdcc56425..3755ef494390 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -159,7 +159,12 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
159 | if (rev >= 0x40) | 159 | if (rev >= 0x40) |
160 | acpi_fix_pin2_polarity = 1; | 160 | acpi_fix_pin2_polarity = 1; |
161 | 161 | ||
162 | if (rev > 0x13) | 162 | /* |
163 | * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... | ||
164 | * SB700: revisions 0x39, 0x3a, ... | ||
165 | * SB800: revisions 0x40, 0x41, ... | ||
166 | */ | ||
167 | if (rev >= 0x39) | ||
163 | return; | 168 | return; |
164 | 169 | ||
165 | if (acpi_use_timer_override) | 170 | if (acpi_use_timer_override) |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c8b4efad7ebb..5c1a91974918 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -65,6 +65,8 @@ | |||
65 | #define sysexit_audit syscall_exit_work | 65 | #define sysexit_audit syscall_exit_work |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | .section .entry.text, "ax" | ||
69 | |||
68 | /* | 70 | /* |
69 | * We use macros for low-level operations which need to be overridden | 71 | * We use macros for low-level operations which need to be overridden |
70 | * for paravirtualization. The following will never clobber any registers: | 72 | * for paravirtualization. The following will never clobber any registers: |
@@ -395,7 +397,7 @@ sysenter_past_esp: | |||
395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | 397 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words |
396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | 398 | * pushed above; +8 corresponds to copy_thread's esp0 setting. |
397 | */ | 399 | */ |
398 | pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp) | 400 | pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) |
399 | CFI_REL_OFFSET eip, 0 | 401 | CFI_REL_OFFSET eip, 0 |
400 | 402 | ||
401 | pushl_cfi %eax | 403 | pushl_cfi %eax |
@@ -788,7 +790,7 @@ ENDPROC(ptregs_clone) | |||
788 | */ | 790 | */ |
789 | .section .init.rodata,"a" | 791 | .section .init.rodata,"a" |
790 | ENTRY(interrupt) | 792 | ENTRY(interrupt) |
791 | .text | 793 | .section .entry.text, "ax" |
792 | .p2align 5 | 794 | .p2align 5 |
793 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 795 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
794 | ENTRY(irq_entries_start) | 796 | ENTRY(irq_entries_start) |
@@ -807,7 +809,7 @@ vector=FIRST_EXTERNAL_VECTOR | |||
807 | .endif | 809 | .endif |
808 | .previous | 810 | .previous |
809 | .long 1b | 811 | .long 1b |
810 | .text | 812 | .section .entry.text, "ax" |
811 | vector=vector+1 | 813 | vector=vector+1 |
812 | .endif | 814 | .endif |
813 | .endr | 815 | .endr |
@@ -1409,11 +1411,10 @@ END(general_protection) | |||
1409 | #ifdef CONFIG_KVM_GUEST | 1411 | #ifdef CONFIG_KVM_GUEST |
1410 | ENTRY(async_page_fault) | 1412 | ENTRY(async_page_fault) |
1411 | RING0_EC_FRAME | 1413 | RING0_EC_FRAME |
1412 | pushl $do_async_page_fault | 1414 | pushl_cfi $do_async_page_fault |
1413 | CFI_ADJUST_CFA_OFFSET 4 | ||
1414 | jmp error_code | 1415 | jmp error_code |
1415 | CFI_ENDPROC | 1416 | CFI_ENDPROC |
1416 | END(apf_page_fault) | 1417 | END(async_page_fault) |
1417 | #endif | 1418 | #endif |
1418 | 1419 | ||
1419 | /* | 1420 | /* |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index aed1ffbeb0c9..8a445a0c989e 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -18,7 +18,7 @@ | |||
18 | * A note on terminology: | 18 | * A note on terminology: |
19 | * - top of stack: Architecture defined interrupt frame from SS to RIP | 19 | * - top of stack: Architecture defined interrupt frame from SS to RIP |
20 | * at the top of the kernel process stack. | 20 | * at the top of the kernel process stack. |
21 | * - partial stack frame: partially saved registers upto R11. | 21 | * - partial stack frame: partially saved registers up to R11. |
22 | * - full stack frame: Like partial stack frame, but all register saved. | 22 | * - full stack frame: Like partial stack frame, but all register saved. |
23 | * | 23 | * |
24 | * Some macro usage: | 24 | * Some macro usage: |
@@ -61,6 +61,8 @@ | |||
61 | #define __AUDIT_ARCH_LE 0x40000000 | 61 | #define __AUDIT_ARCH_LE 0x40000000 |
62 | 62 | ||
63 | .code64 | 63 | .code64 |
64 | .section .entry.text, "ax" | ||
65 | |||
64 | #ifdef CONFIG_FUNCTION_TRACER | 66 | #ifdef CONFIG_FUNCTION_TRACER |
65 | #ifdef CONFIG_DYNAMIC_FTRACE | 67 | #ifdef CONFIG_DYNAMIC_FTRACE |
66 | ENTRY(mcount) | 68 | ENTRY(mcount) |
@@ -420,7 +422,7 @@ ENTRY(ret_from_fork) | |||
420 | END(ret_from_fork) | 422 | END(ret_from_fork) |
421 | 423 | ||
422 | /* | 424 | /* |
423 | * System call entry. Upto 6 arguments in registers are supported. | 425 | * System call entry. Up to 6 arguments in registers are supported. |
424 | * | 426 | * |
425 | * SYSCALL does not save anything on the stack and does not change the | 427 | * SYSCALL does not save anything on the stack and does not change the |
426 | * stack pointer. | 428 | * stack pointer. |
@@ -744,7 +746,7 @@ END(stub_rt_sigreturn) | |||
744 | */ | 746 | */ |
745 | .section .init.rodata,"a" | 747 | .section .init.rodata,"a" |
746 | ENTRY(interrupt) | 748 | ENTRY(interrupt) |
747 | .text | 749 | .section .entry.text |
748 | .p2align 5 | 750 | .p2align 5 |
749 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 751 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
750 | ENTRY(irq_entries_start) | 752 | ENTRY(irq_entries_start) |
@@ -763,7 +765,7 @@ vector=FIRST_EXTERNAL_VECTOR | |||
763 | .endif | 765 | .endif |
764 | .previous | 766 | .previous |
765 | .quad 1b | 767 | .quad 1b |
766 | .text | 768 | .section .entry.text |
767 | vector=vector+1 | 769 | vector=vector+1 |
768 | .endif | 770 | .endif |
769 | .endr | 771 | .endr |
@@ -975,9 +977,12 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ | |||
975 | x86_platform_ipi smp_x86_platform_ipi | 977 | x86_platform_ipi smp_x86_platform_ipi |
976 | 978 | ||
977 | #ifdef CONFIG_SMP | 979 | #ifdef CONFIG_SMP |
978 | .irpc idx, "01234567" | 980 | .irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ |
981 | 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 | ||
982 | .if NUM_INVALIDATE_TLB_VECTORS > \idx | ||
979 | apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ | 983 | apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ |
980 | invalidate_interrupt\idx smp_invalidate_interrupt | 984 | invalidate_interrupt\idx smp_invalidate_interrupt |
985 | .endif | ||
981 | .endr | 986 | .endr |
982 | #endif | 987 | #endif |
983 | 988 | ||
@@ -1248,7 +1253,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | |||
1248 | decl PER_CPU_VAR(irq_count) | 1253 | decl PER_CPU_VAR(irq_count) |
1249 | jmp error_exit | 1254 | jmp error_exit |
1250 | CFI_ENDPROC | 1255 | CFI_ENDPROC |
1251 | END(do_hypervisor_callback) | 1256 | END(xen_do_hypervisor_callback) |
1252 | 1257 | ||
1253 | /* | 1258 | /* |
1254 | * Hypervisor uses this for application faults while it executes. | 1259 | * Hypervisor uses this for application faults while it executes. |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 382eb2936d4d..a93742a57468 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -437,18 +437,19 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
437 | return; | 437 | return; |
438 | } | 438 | } |
439 | 439 | ||
440 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
441 | frame_pointer) == -EBUSY) { | ||
442 | *parent = old; | ||
443 | return; | ||
444 | } | ||
445 | |||
446 | trace.func = self_addr; | 440 | trace.func = self_addr; |
441 | trace.depth = current->curr_ret_stack + 1; | ||
447 | 442 | ||
448 | /* Only trace if the calling function expects to */ | 443 | /* Only trace if the calling function expects to */ |
449 | if (!ftrace_graph_entry(&trace)) { | 444 | if (!ftrace_graph_entry(&trace)) { |
450 | current->curr_ret_stack--; | ||
451 | *parent = old; | 445 | *parent = old; |
446 | return; | ||
447 | } | ||
448 | |||
449 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
450 | frame_pointer) == -EBUSY) { | ||
451 | *parent = old; | ||
452 | return; | ||
452 | } | 453 | } |
453 | } | 454 | } |
454 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 455 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 7f138b3c3c52..d6d6bb361931 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -34,15 +34,6 @@ void __init i386_start_kernel(void) | |||
34 | { | 34 | { |
35 | memblock_init(); | 35 | memblock_init(); |
36 | 36 | ||
37 | #ifdef CONFIG_X86_TRAMPOLINE | ||
38 | /* | ||
39 | * But first pinch a few for the stack/trampoline stuff | ||
40 | * FIXME: Don't need the extra page at 4K, but need to fix | ||
41 | * trampoline before removing it. (see the GDT stuff) | ||
42 | */ | ||
43 | memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); | ||
44 | #endif | ||
45 | |||
46 | memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); | 37 | memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); |
47 | 38 | ||
48 | #ifdef CONFIG_BLK_DEV_INITRD | 39 | #ifdef CONFIG_BLK_DEV_INITRD |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 2d2673c28aff..5655c2272adb 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data) | |||
77 | /* Make NULL pointers segfault */ | 77 | /* Make NULL pointers segfault */ |
78 | zap_identity_mappings(); | 78 | zap_identity_mappings(); |
79 | 79 | ||
80 | /* Cleanup the over mapped high alias */ | ||
81 | cleanup_highmap(); | ||
82 | |||
83 | max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; | 80 | max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; |
84 | 81 | ||
85 | for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { | 82 | for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 767d6c43de37..ce0be7cd085e 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -73,7 +73,7 @@ MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT | |||
73 | */ | 73 | */ |
74 | KERNEL_PAGES = LOWMEM_PAGES | 74 | KERNEL_PAGES = LOWMEM_PAGES |
75 | 75 | ||
76 | INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm | 76 | INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE |
77 | RESERVE_BRK(pagetables, INIT_MAP_SIZE) | 77 | RESERVE_BRK(pagetables, INIT_MAP_SIZE) |
78 | 78 | ||
79 | /* | 79 | /* |
@@ -137,7 +137,7 @@ ENTRY(startup_32) | |||
137 | movsl | 137 | movsl |
138 | 1: | 138 | 1: |
139 | 139 | ||
140 | #ifdef CONFIG_OLPC_OPENFIRMWARE | 140 | #ifdef CONFIG_OLPC |
141 | /* save OFW's pgdir table for later use when calling into OFW */ | 141 | /* save OFW's pgdir table for later use when calling into OFW */ |
142 | movl %cr3, %eax | 142 | movl %cr3, %eax |
143 | movl %eax, pa(olpc_ofw_pgd) | 143 | movl %eax, pa(olpc_ofw_pgd) |
@@ -623,7 +623,7 @@ ENTRY(initial_code) | |||
623 | * BSS section | 623 | * BSS section |
624 | */ | 624 | */ |
625 | __PAGE_ALIGNED_BSS | 625 | __PAGE_ALIGNED_BSS |
626 | .align PAGE_SIZE_asm | 626 | .align PAGE_SIZE |
627 | #ifdef CONFIG_X86_PAE | 627 | #ifdef CONFIG_X86_PAE |
628 | initial_pg_pmd: | 628 | initial_pg_pmd: |
629 | .fill 1024*KPMDS,4,0 | 629 | .fill 1024*KPMDS,4,0 |
@@ -644,7 +644,7 @@ ENTRY(swapper_pg_dir) | |||
644 | #ifdef CONFIG_X86_PAE | 644 | #ifdef CONFIG_X86_PAE |
645 | __PAGE_ALIGNED_DATA | 645 | __PAGE_ALIGNED_DATA |
646 | /* Page-aligned for the benefit of paravirt? */ | 646 | /* Page-aligned for the benefit of paravirt? */ |
647 | .align PAGE_SIZE_asm | 647 | .align PAGE_SIZE |
648 | ENTRY(initial_page_table) | 648 | ENTRY(initial_page_table) |
649 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ | 649 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ |
650 | # if KPMDS == 3 | 650 | # if KPMDS == 3 |
@@ -662,7 +662,7 @@ ENTRY(initial_page_table) | |||
662 | # else | 662 | # else |
663 | # error "Kernel PMDs should be 1, 2 or 3" | 663 | # error "Kernel PMDs should be 1, 2 or 3" |
664 | # endif | 664 | # endif |
665 | .align PAGE_SIZE_asm /* needs to be page-sized too */ | 665 | .align PAGE_SIZE /* needs to be page-sized too */ |
666 | #endif | 666 | #endif |
667 | 667 | ||
668 | .data | 668 | .data |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 239046bd447f..e11e39478a49 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -136,10 +136,9 @@ ident_complete: | |||
136 | /* Fixup phys_base */ | 136 | /* Fixup phys_base */ |
137 | addq %rbp, phys_base(%rip) | 137 | addq %rbp, phys_base(%rip) |
138 | 138 | ||
139 | #ifdef CONFIG_X86_TRAMPOLINE | 139 | /* Fixup trampoline */ |
140 | addq %rbp, trampoline_level4_pgt + 0(%rip) | 140 | addq %rbp, trampoline_level4_pgt + 0(%rip) |
141 | addq %rbp, trampoline_level4_pgt + (511*8)(%rip) | 141 | addq %rbp, trampoline_level4_pgt + (511*8)(%rip) |
142 | #endif | ||
143 | 142 | ||
144 | /* Due to ENTRY(), sometimes the empty space gets filled with | 143 | /* Due to ENTRY(), sometimes the empty space gets filled with |
145 | * zeros. Better take a jmp than relying on empty space being | 144 | * zeros. Better take a jmp than relying on empty space being |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 4ff5968f12d2..bfe8f729e086 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -503,7 +503,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) | |||
503 | if (!irq) | 503 | if (!irq) |
504 | return -EINVAL; | 504 | return -EINVAL; |
505 | 505 | ||
506 | set_irq_data(irq, dev); | 506 | irq_set_handler_data(irq, dev); |
507 | 507 | ||
508 | if (hpet_setup_msi_irq(irq)) | 508 | if (hpet_setup_msi_irq(irq)) |
509 | return -EINVAL; | 509 | return -EINVAL; |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index e60c38cc0eed..12aff2537682 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -145,7 +145,7 @@ EXPORT_SYMBOL_GPL(fpu_finit); | |||
145 | * The _current_ task is using the FPU for the first time | 145 | * The _current_ task is using the FPU for the first time |
146 | * so initialize it and set the mxcsr to its default | 146 | * so initialize it and set the mxcsr to its default |
147 | * value at reset if we support XMM instructions and then | 147 | * value at reset if we support XMM instructions and then |
148 | * remeber the current task has used the FPU. | 148 | * remember the current task has used the FPU. |
149 | */ | 149 | */ |
150 | int init_fpu(struct task_struct *tsk) | 150 | int init_fpu(struct task_struct *tsk) |
151 | { | 151 | { |
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 20757cb2efa3..d9ca749c123b 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -112,7 +112,7 @@ static void make_8259A_irq(unsigned int irq) | |||
112 | { | 112 | { |
113 | disable_irq_nosync(irq); | 113 | disable_irq_nosync(irq); |
114 | io_apic_irqs &= ~(1<<irq); | 114 | io_apic_irqs &= ~(1<<irq); |
115 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, | 115 | irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, |
116 | i8259A_chip.name); | 116 | i8259A_chip.name); |
117 | enable_irq(irq); | 117 | enable_irq(irq); |
118 | } | 118 | } |
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 8eec0ec59af2..8c968974253d 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
@@ -14,22 +14,9 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/thread_info.h> | 15 | #include <linux/thread_info.h> |
16 | #include <linux/syscalls.h> | 16 | #include <linux/syscalls.h> |
17 | #include <linux/bitmap.h> | ||
17 | #include <asm/syscalls.h> | 18 | #include <asm/syscalls.h> |
18 | 19 | ||
19 | /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ | ||
20 | static void set_bitmap(unsigned long *bitmap, unsigned int base, | ||
21 | unsigned int extent, int new_value) | ||
22 | { | ||
23 | unsigned int i; | ||
24 | |||
25 | for (i = base; i < base + extent; i++) { | ||
26 | if (new_value) | ||
27 | __set_bit(i, bitmap); | ||
28 | else | ||
29 | __clear_bit(i, bitmap); | ||
30 | } | ||
31 | } | ||
32 | |||
33 | /* | 20 | /* |
34 | * this changes the io permissions bitmap in the current task. | 21 | * this changes the io permissions bitmap in the current task. |
35 | */ | 22 | */ |
@@ -69,7 +56,10 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) | |||
69 | */ | 56 | */ |
70 | tss = &per_cpu(init_tss, get_cpu()); | 57 | tss = &per_cpu(init_tss, get_cpu()); |
71 | 58 | ||
72 | set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); | 59 | if (turn_on) |
60 | bitmap_clear(t->io_bitmap_ptr, from, num); | ||
61 | else | ||
62 | bitmap_set(t->io_bitmap_ptr, from, num); | ||
73 | 63 | ||
74 | /* | 64 | /* |
75 | * Search for a (possibly new) maximum. This is simple and stupid, | 65 | * Search for a (possibly new) maximum. This is simple and stupid, |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 387b6a0c9e81..948a31eae75f 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -44,9 +44,9 @@ void ack_bad_irq(unsigned int irq) | |||
44 | 44 | ||
45 | #define irq_stats(x) (&per_cpu(irq_stat, x)) | 45 | #define irq_stats(x) (&per_cpu(irq_stat, x)) |
46 | /* | 46 | /* |
47 | * /proc/interrupts printing: | 47 | * /proc/interrupts printing for arch specific interrupts |
48 | */ | 48 | */ |
49 | static int show_other_interrupts(struct seq_file *p, int prec) | 49 | int arch_show_interrupts(struct seq_file *p, int prec) |
50 | { | 50 | { |
51 | int j; | 51 | int j; |
52 | 52 | ||
@@ -122,59 +122,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
122 | return 0; | 122 | return 0; |
123 | } | 123 | } |
124 | 124 | ||
125 | int show_interrupts(struct seq_file *p, void *v) | ||
126 | { | ||
127 | unsigned long flags, any_count = 0; | ||
128 | int i = *(loff_t *) v, j, prec; | ||
129 | struct irqaction *action; | ||
130 | struct irq_desc *desc; | ||
131 | |||
132 | if (i > nr_irqs) | ||
133 | return 0; | ||
134 | |||
135 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
136 | j *= 10; | ||
137 | |||
138 | if (i == nr_irqs) | ||
139 | return show_other_interrupts(p, prec); | ||
140 | |||
141 | /* print header */ | ||
142 | if (i == 0) { | ||
143 | seq_printf(p, "%*s", prec + 8, ""); | ||
144 | for_each_online_cpu(j) | ||
145 | seq_printf(p, "CPU%-8d", j); | ||
146 | seq_putc(p, '\n'); | ||
147 | } | ||
148 | |||
149 | desc = irq_to_desc(i); | ||
150 | if (!desc) | ||
151 | return 0; | ||
152 | |||
153 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
154 | for_each_online_cpu(j) | ||
155 | any_count |= kstat_irqs_cpu(i, j); | ||
156 | action = desc->action; | ||
157 | if (!action && !any_count) | ||
158 | goto out; | ||
159 | |||
160 | seq_printf(p, "%*d: ", prec, i); | ||
161 | for_each_online_cpu(j) | ||
162 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
163 | seq_printf(p, " %8s", desc->irq_data.chip->name); | ||
164 | seq_printf(p, "-%-8s", desc->name); | ||
165 | |||
166 | if (action) { | ||
167 | seq_printf(p, " %s", action->name); | ||
168 | while ((action = action->next) != NULL) | ||
169 | seq_printf(p, ", %s", action->name); | ||
170 | } | ||
171 | |||
172 | seq_putc(p, '\n'); | ||
173 | out: | ||
174 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /* | 125 | /* |
179 | * /proc/stat helpers | 126 | * /proc/stat helpers |
180 | */ | 127 | */ |
@@ -276,15 +223,6 @@ void smp_x86_platform_ipi(struct pt_regs *regs) | |||
276 | 223 | ||
277 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); | 224 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
278 | 225 | ||
279 | #ifdef CONFIG_OF | ||
280 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
281 | const u32 *intspec, unsigned int intsize) | ||
282 | { | ||
283 | return intspec[0]; | ||
284 | } | ||
285 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
286 | #endif | ||
287 | |||
288 | #ifdef CONFIG_HOTPLUG_CPU | 226 | #ifdef CONFIG_HOTPLUG_CPU |
289 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | 227 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
290 | void fixup_irqs(void) | 228 | void fixup_irqs(void) |
@@ -293,6 +231,7 @@ void fixup_irqs(void) | |||
293 | static int warned; | 231 | static int warned; |
294 | struct irq_desc *desc; | 232 | struct irq_desc *desc; |
295 | struct irq_data *data; | 233 | struct irq_data *data; |
234 | struct irq_chip *chip; | ||
296 | 235 | ||
297 | for_each_irq_desc(irq, desc) { | 236 | for_each_irq_desc(irq, desc) { |
298 | int break_affinity = 0; | 237 | int break_affinity = 0; |
@@ -307,10 +246,10 @@ void fixup_irqs(void) | |||
307 | /* interrupt's are disabled at this point */ | 246 | /* interrupt's are disabled at this point */ |
308 | raw_spin_lock(&desc->lock); | 247 | raw_spin_lock(&desc->lock); |
309 | 248 | ||
310 | data = &desc->irq_data; | 249 | data = irq_desc_get_irq_data(desc); |
311 | affinity = data->affinity; | 250 | affinity = data->affinity; |
312 | if (!irq_has_action(irq) || | 251 | if (!irq_has_action(irq) || |
313 | cpumask_equal(affinity, cpu_online_mask)) { | 252 | cpumask_subset(affinity, cpu_online_mask)) { |
314 | raw_spin_unlock(&desc->lock); | 253 | raw_spin_unlock(&desc->lock); |
315 | continue; | 254 | continue; |
316 | } | 255 | } |
@@ -327,16 +266,17 @@ void fixup_irqs(void) | |||
327 | affinity = cpu_all_mask; | 266 | affinity = cpu_all_mask; |
328 | } | 267 | } |
329 | 268 | ||
330 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) | 269 | chip = irq_data_get_irq_chip(data); |
331 | data->chip->irq_mask(data); | 270 | if (!irqd_can_move_in_process_context(data) && chip->irq_mask) |
271 | chip->irq_mask(data); | ||
332 | 272 | ||
333 | if (data->chip->irq_set_affinity) | 273 | if (chip->irq_set_affinity) |
334 | data->chip->irq_set_affinity(data, affinity, true); | 274 | chip->irq_set_affinity(data, affinity, true); |
335 | else if (!(warned++)) | 275 | else if (!(warned++)) |
336 | set_affinity = 0; | 276 | set_affinity = 0; |
337 | 277 | ||
338 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) | 278 | if (!irqd_can_move_in_process_context(data) && chip->irq_unmask) |
339 | data->chip->irq_unmask(data); | 279 | chip->irq_unmask(data); |
340 | 280 | ||
341 | raw_spin_unlock(&desc->lock); | 281 | raw_spin_unlock(&desc->lock); |
342 | 282 | ||
@@ -368,10 +308,11 @@ void fixup_irqs(void) | |||
368 | irq = __this_cpu_read(vector_irq[vector]); | 308 | irq = __this_cpu_read(vector_irq[vector]); |
369 | 309 | ||
370 | desc = irq_to_desc(irq); | 310 | desc = irq_to_desc(irq); |
371 | data = &desc->irq_data; | 311 | data = irq_desc_get_irq_data(desc); |
312 | chip = irq_data_get_irq_chip(data); | ||
372 | raw_spin_lock(&desc->lock); | 313 | raw_spin_lock(&desc->lock); |
373 | if (data->chip->irq_retrigger) | 314 | if (chip->irq_retrigger) |
374 | data->chip->irq_retrigger(data); | 315 | chip->irq_retrigger(data); |
375 | raw_spin_unlock(&desc->lock); | 316 | raw_spin_unlock(&desc->lock); |
376 | } | 317 | } |
377 | } | 318 | } |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 9974d21048fd..72090705a656 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -172,7 +172,7 @@ asmlinkage void do_softirq(void) | |||
172 | 172 | ||
173 | call_on_stack(__do_softirq, isp); | 173 | call_on_stack(__do_softirq, isp); |
174 | /* | 174 | /* |
175 | * Shouldnt happen, we returned above if in_interrupt(): | 175 | * Shouldn't happen, we returned above if in_interrupt(): |
176 | */ | 176 | */ |
177 | WARN_ON_ONCE(softirq_count()); | 177 | WARN_ON_ONCE(softirq_count()); |
178 | } | 178 | } |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index c752e973958d..f470e4ef993e 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
26 | #include <asm/i8259.h> | 26 | #include <asm/i8259.h> |
27 | #include <asm/traps.h> | 27 | #include <asm/traps.h> |
28 | #include <asm/prom.h> | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: | 31 | * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: |
@@ -71,6 +72,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) | |||
71 | static struct irqaction fpu_irq = { | 72 | static struct irqaction fpu_irq = { |
72 | .handler = math_error_irq, | 73 | .handler = math_error_irq, |
73 | .name = "fpu", | 74 | .name = "fpu", |
75 | .flags = IRQF_NO_THREAD, | ||
74 | }; | 76 | }; |
75 | #endif | 77 | #endif |
76 | 78 | ||
@@ -80,6 +82,7 @@ static struct irqaction fpu_irq = { | |||
80 | static struct irqaction irq2 = { | 82 | static struct irqaction irq2 = { |
81 | .handler = no_action, | 83 | .handler = no_action, |
82 | .name = "cascade", | 84 | .name = "cascade", |
85 | .flags = IRQF_NO_THREAD, | ||
83 | }; | 86 | }; |
84 | 87 | ||
85 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | 88 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { |
@@ -110,7 +113,7 @@ void __init init_ISA_irqs(void) | |||
110 | legacy_pic->init(0); | 113 | legacy_pic->init(0); |
111 | 114 | ||
112 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) | 115 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) |
113 | set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); | 116 | irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); |
114 | } | 117 | } |
115 | 118 | ||
116 | void __init init_IRQ(void) | 119 | void __init init_IRQ(void) |
@@ -118,6 +121,12 @@ void __init init_IRQ(void) | |||
118 | int i; | 121 | int i; |
119 | 122 | ||
120 | /* | 123 | /* |
124 | * We probably need a better place for this, but it works for | ||
125 | * now ... | ||
126 | */ | ||
127 | x86_add_irq_domains(); | ||
128 | |||
129 | /* | ||
121 | * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15. | 130 | * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15. |
122 | * If these IRQ's are handled by legacy interrupt-controllers like PIC, | 131 | * If these IRQ's are handled by legacy interrupt-controllers like PIC, |
123 | * then this configuration will likely be static after the boot. If | 132 | * then this configuration will likely be static after the boot. If |
@@ -164,14 +173,77 @@ static void __init smp_intr_init(void) | |||
164 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | 173 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); |
165 | 174 | ||
166 | /* IPIs for invalidation */ | 175 | /* IPIs for invalidation */ |
167 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); | 176 | #define ALLOC_INVTLB_VEC(NR) \ |
168 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); | 177 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \ |
169 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); | 178 | invalidate_interrupt##NR) |
170 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); | 179 | |
171 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); | 180 | switch (NUM_INVALIDATE_TLB_VECTORS) { |
172 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); | 181 | default: |
173 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); | 182 | ALLOC_INVTLB_VEC(31); |
174 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); | 183 | case 31: |
184 | ALLOC_INVTLB_VEC(30); | ||
185 | case 30: | ||
186 | ALLOC_INVTLB_VEC(29); | ||
187 | case 29: | ||
188 | ALLOC_INVTLB_VEC(28); | ||
189 | case 28: | ||
190 | ALLOC_INVTLB_VEC(27); | ||
191 | case 27: | ||
192 | ALLOC_INVTLB_VEC(26); | ||
193 | case 26: | ||
194 | ALLOC_INVTLB_VEC(25); | ||
195 | case 25: | ||
196 | ALLOC_INVTLB_VEC(24); | ||
197 | case 24: | ||
198 | ALLOC_INVTLB_VEC(23); | ||
199 | case 23: | ||
200 | ALLOC_INVTLB_VEC(22); | ||
201 | case 22: | ||
202 | ALLOC_INVTLB_VEC(21); | ||
203 | case 21: | ||
204 | ALLOC_INVTLB_VEC(20); | ||
205 | case 20: | ||
206 | ALLOC_INVTLB_VEC(19); | ||
207 | case 19: | ||
208 | ALLOC_INVTLB_VEC(18); | ||
209 | case 18: | ||
210 | ALLOC_INVTLB_VEC(17); | ||
211 | case 17: | ||
212 | ALLOC_INVTLB_VEC(16); | ||
213 | case 16: | ||
214 | ALLOC_INVTLB_VEC(15); | ||
215 | case 15: | ||
216 | ALLOC_INVTLB_VEC(14); | ||
217 | case 14: | ||
218 | ALLOC_INVTLB_VEC(13); | ||
219 | case 13: | ||
220 | ALLOC_INVTLB_VEC(12); | ||
221 | case 12: | ||
222 | ALLOC_INVTLB_VEC(11); | ||
223 | case 11: | ||
224 | ALLOC_INVTLB_VEC(10); | ||
225 | case 10: | ||
226 | ALLOC_INVTLB_VEC(9); | ||
227 | case 9: | ||
228 | ALLOC_INVTLB_VEC(8); | ||
229 | case 8: | ||
230 | ALLOC_INVTLB_VEC(7); | ||
231 | case 7: | ||
232 | ALLOC_INVTLB_VEC(6); | ||
233 | case 6: | ||
234 | ALLOC_INVTLB_VEC(5); | ||
235 | case 5: | ||
236 | ALLOC_INVTLB_VEC(4); | ||
237 | case 4: | ||
238 | ALLOC_INVTLB_VEC(3); | ||
239 | case 3: | ||
240 | ALLOC_INVTLB_VEC(2); | ||
241 | case 2: | ||
242 | ALLOC_INVTLB_VEC(1); | ||
243 | case 1: | ||
244 | ALLOC_INVTLB_VEC(0); | ||
245 | break; | ||
246 | } | ||
175 | 247 | ||
176 | /* IPI for generic function call */ | 248 | /* IPI for generic function call */ |
177 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 249 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
@@ -243,7 +315,7 @@ void __init native_init_IRQ(void) | |||
243 | set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); | 315 | set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); |
244 | } | 316 | } |
245 | 317 | ||
246 | if (!acpi_ioapic) | 318 | if (!acpi_ioapic && !of_ioapic) |
247 | setup_irq(2, &irq2); | 319 | setup_irq(2, &irq2); |
248 | 320 | ||
249 | #ifdef CONFIG_X86_32 | 321 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index a4130005028a..dba0b36941a5 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -278,7 +278,7 @@ static int hw_break_release_slot(int breakno) | |||
278 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | 278 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); |
279 | if (dbg_release_bp_slot(*pevent)) | 279 | if (dbg_release_bp_slot(*pevent)) |
280 | /* | 280 | /* |
281 | * The debugger is responisble for handing the retry on | 281 | * The debugger is responsible for handing the retry on |
282 | * remove failure. | 282 | * remove failure. |
283 | */ | 283 | */ |
284 | return -1; | 284 | return -1; |
@@ -533,15 +533,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |||
533 | } | 533 | } |
534 | return NOTIFY_DONE; | 534 | return NOTIFY_DONE; |
535 | 535 | ||
536 | case DIE_NMIWATCHDOG: | ||
537 | if (atomic_read(&kgdb_active) != -1) { | ||
538 | /* KGDB CPU roundup: */ | ||
539 | kgdb_nmicallback(raw_smp_processor_id(), regs); | ||
540 | return NOTIFY_STOP; | ||
541 | } | ||
542 | /* Enter debugger: */ | ||
543 | break; | ||
544 | |||
545 | case DIE_DEBUG: | 536 | case DIE_DEBUG: |
546 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { | 537 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { |
547 | if (user_mode(regs)) | 538 | if (user_mode(regs)) |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index d91c477b3f62..c969fd9d1566 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -1276,6 +1276,14 @@ static int __kprobes can_optimize(unsigned long paddr) | |||
1276 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) | 1276 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) |
1277 | return 0; | 1277 | return 0; |
1278 | 1278 | ||
1279 | /* | ||
1280 | * Do not optimize in the entry code due to the unstable | ||
1281 | * stack handling. | ||
1282 | */ | ||
1283 | if ((paddr >= (unsigned long )__entry_text_start) && | ||
1284 | (paddr < (unsigned long )__entry_text_end)) | ||
1285 | return 0; | ||
1286 | |||
1279 | /* Check there is enough space for a relative jump. */ | 1287 | /* Check there is enough space for a relative jump. */ |
1280 | if (size - offset < RELATIVEJUMP_SIZE) | 1288 | if (size - offset < RELATIVEJUMP_SIZE) |
1281 | return 0; | 1289 | return 0; |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 8dc44662394b..33c07b0b122e 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -493,7 +493,7 @@ static void __init kvm_smp_prepare_boot_cpu(void) | |||
493 | native_smp_prepare_boot_cpu(); | 493 | native_smp_prepare_boot_cpu(); |
494 | } | 494 | } |
495 | 495 | ||
496 | static void kvm_guest_cpu_online(void *dummy) | 496 | static void __cpuinit kvm_guest_cpu_online(void *dummy) |
497 | { | 497 | { |
498 | kvm_guest_cpu_init(); | 498 | kvm_guest_cpu_init(); |
499 | } | 499 | } |
diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c index 63eaf6596233..177183cbb6ae 100644 --- a/arch/x86/kernel/mca_32.c +++ b/arch/x86/kernel/mca_32.c | |||
@@ -259,7 +259,7 @@ static int __init mca_init(void) | |||
259 | /* | 259 | /* |
260 | * WARNING: Be careful when making changes here. Putting an adapter | 260 | * WARNING: Be careful when making changes here. Putting an adapter |
261 | * and the motherboard simultaneously into setup mode may result in | 261 | * and the motherboard simultaneously into setup mode may result in |
262 | * damage to chips (according to The Indispensible PC Hardware Book | 262 | * damage to chips (according to The Indispensable PC Hardware Book |
263 | * by Hans-Peter Messmer). Also, we disable system interrupts (so | 263 | * by Hans-Peter Messmer). Also, we disable system interrupts (so |
264 | * that we are not disturbed in the middle of this). | 264 | * that we are not disturbed in the middle of this). |
265 | */ | 265 | */ |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 0fe6d1a66c38..c5610384ab16 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -66,7 +66,6 @@ struct microcode_amd { | |||
66 | unsigned int mpb[0]; | 66 | unsigned int mpb[0]; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | #define UCODE_MAX_SIZE 2048 | ||
70 | #define UCODE_CONTAINER_SECTION_HDR 8 | 69 | #define UCODE_CONTAINER_SECTION_HDR 8 |
71 | #define UCODE_CONTAINER_HEADER_SIZE 12 | 70 | #define UCODE_CONTAINER_HEADER_SIZE 12 |
72 | 71 | ||
@@ -77,20 +76,20 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | |||
77 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 76 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
78 | u32 dummy; | 77 | u32 dummy; |
79 | 78 | ||
80 | memset(csig, 0, sizeof(*csig)); | ||
81 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | 79 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { |
82 | pr_warning("microcode: CPU%d: AMD CPU family 0x%x not " | 80 | pr_warning("CPU%d: family %d not supported\n", cpu, c->x86); |
83 | "supported\n", cpu, c->x86); | ||
84 | return -1; | 81 | return -1; |
85 | } | 82 | } |
83 | |||
86 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); | 84 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); |
87 | pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev); | 85 | pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); |
86 | |||
88 | return 0; | 87 | return 0; |
89 | } | 88 | } |
90 | 89 | ||
91 | static int get_matching_microcode(int cpu, void *mc, int rev) | 90 | static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr, |
91 | int rev) | ||
92 | { | 92 | { |
93 | struct microcode_header_amd *mc_header = mc; | ||
94 | unsigned int current_cpu_id; | 93 | unsigned int current_cpu_id; |
95 | u16 equiv_cpu_id = 0; | 94 | u16 equiv_cpu_id = 0; |
96 | unsigned int i = 0; | 95 | unsigned int i = 0; |
@@ -109,17 +108,17 @@ static int get_matching_microcode(int cpu, void *mc, int rev) | |||
109 | if (!equiv_cpu_id) | 108 | if (!equiv_cpu_id) |
110 | return 0; | 109 | return 0; |
111 | 110 | ||
112 | if (mc_header->processor_rev_id != equiv_cpu_id) | 111 | if (mc_hdr->processor_rev_id != equiv_cpu_id) |
113 | return 0; | 112 | return 0; |
114 | 113 | ||
115 | /* ucode might be chipset specific -- currently we don't support this */ | 114 | /* ucode might be chipset specific -- currently we don't support this */ |
116 | if (mc_header->nb_dev_id || mc_header->sb_dev_id) { | 115 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { |
117 | pr_err("CPU%d: loading of chipset specific code not yet supported\n", | 116 | pr_err("CPU%d: chipset specific code not yet supported\n", |
118 | cpu); | 117 | cpu); |
119 | return 0; | 118 | return 0; |
120 | } | 119 | } |
121 | 120 | ||
122 | if (mc_header->patch_id <= rev) | 121 | if (mc_hdr->patch_id <= rev) |
123 | return 0; | 122 | return 0; |
124 | 123 | ||
125 | return 1; | 124 | return 1; |
@@ -144,71 +143,93 @@ static int apply_microcode_amd(int cpu) | |||
144 | 143 | ||
145 | /* check current patch id and patch's id for match */ | 144 | /* check current patch id and patch's id for match */ |
146 | if (rev != mc_amd->hdr.patch_id) { | 145 | if (rev != mc_amd->hdr.patch_id) { |
147 | pr_err("CPU%d: update failed (for patch_level=0x%x)\n", | 146 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
148 | cpu, mc_amd->hdr.patch_id); | 147 | cpu, mc_amd->hdr.patch_id); |
149 | return -1; | 148 | return -1; |
150 | } | 149 | } |
151 | 150 | ||
152 | pr_info("CPU%d: updated (new patch_level=0x%x)\n", cpu, rev); | 151 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); |
153 | uci->cpu_sig.rev = rev; | 152 | uci->cpu_sig.rev = rev; |
154 | 153 | ||
155 | return 0; | 154 | return 0; |
156 | } | 155 | } |
157 | 156 | ||
158 | static void * | 157 | static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) |
159 | get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) | ||
160 | { | 158 | { |
161 | unsigned int total_size; | 159 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
162 | u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; | 160 | unsigned int max_size, actual_size; |
163 | void *mc; | 161 | |
162 | #define F1XH_MPB_MAX_SIZE 2048 | ||
163 | #define F14H_MPB_MAX_SIZE 1824 | ||
164 | #define F15H_MPB_MAX_SIZE 4096 | ||
165 | |||
166 | switch (c->x86) { | ||
167 | case 0x14: | ||
168 | max_size = F14H_MPB_MAX_SIZE; | ||
169 | break; | ||
170 | case 0x15: | ||
171 | max_size = F15H_MPB_MAX_SIZE; | ||
172 | break; | ||
173 | default: | ||
174 | max_size = F1XH_MPB_MAX_SIZE; | ||
175 | break; | ||
176 | } | ||
164 | 177 | ||
165 | get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR); | 178 | actual_size = buf[4] + (buf[5] << 8); |
166 | 179 | ||
167 | if (section_hdr[0] != UCODE_UCODE_TYPE) { | 180 | if (actual_size > size || actual_size > max_size) { |
168 | pr_err("error: invalid type field in container file section header\n"); | 181 | pr_err("section size mismatch\n"); |
169 | return NULL; | 182 | return 0; |
170 | } | 183 | } |
171 | 184 | ||
172 | total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8)); | 185 | return actual_size; |
186 | } | ||
173 | 187 | ||
174 | if (total_size > size || total_size > UCODE_MAX_SIZE) { | 188 | static struct microcode_header_amd * |
175 | pr_err("error: size mismatch\n"); | 189 | get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size) |
176 | return NULL; | 190 | { |
191 | struct microcode_header_amd *mc = NULL; | ||
192 | unsigned int actual_size = 0; | ||
193 | |||
194 | if (buf[0] != UCODE_UCODE_TYPE) { | ||
195 | pr_err("invalid type field in container file section header\n"); | ||
196 | goto out; | ||
177 | } | 197 | } |
178 | 198 | ||
179 | mc = vzalloc(UCODE_MAX_SIZE); | 199 | actual_size = verify_ucode_size(cpu, buf, size); |
200 | if (!actual_size) | ||
201 | goto out; | ||
202 | |||
203 | mc = vzalloc(actual_size); | ||
180 | if (!mc) | 204 | if (!mc) |
181 | return NULL; | 205 | goto out; |
182 | 206 | ||
183 | get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size); | 207 | get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size); |
184 | *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR; | 208 | *mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR; |
185 | 209 | ||
210 | out: | ||
186 | return mc; | 211 | return mc; |
187 | } | 212 | } |
188 | 213 | ||
189 | static int install_equiv_cpu_table(const u8 *buf) | 214 | static int install_equiv_cpu_table(const u8 *buf) |
190 | { | 215 | { |
191 | u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE]; | 216 | unsigned int *ibuf = (unsigned int *)buf; |
192 | unsigned int *buf_pos = (unsigned int *)container_hdr; | 217 | unsigned int type = ibuf[1]; |
193 | unsigned long size; | 218 | unsigned int size = ibuf[2]; |
194 | 219 | ||
195 | get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE); | 220 | if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { |
196 | 221 | pr_err("empty section/" | |
197 | size = buf_pos[2]; | 222 | "invalid type field in container file section header\n"); |
198 | 223 | return -EINVAL; | |
199 | if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { | ||
200 | pr_err("error: invalid type field in container file section header\n"); | ||
201 | return 0; | ||
202 | } | 224 | } |
203 | 225 | ||
204 | equiv_cpu_table = vmalloc(size); | 226 | equiv_cpu_table = vmalloc(size); |
205 | if (!equiv_cpu_table) { | 227 | if (!equiv_cpu_table) { |
206 | pr_err("failed to allocate equivalent CPU table\n"); | 228 | pr_err("failed to allocate equivalent CPU table\n"); |
207 | return 0; | 229 | return -ENOMEM; |
208 | } | 230 | } |
209 | 231 | ||
210 | buf += UCODE_CONTAINER_HEADER_SIZE; | 232 | get_ucode_data(equiv_cpu_table, buf + UCODE_CONTAINER_HEADER_SIZE, size); |
211 | get_ucode_data(equiv_cpu_table, buf, size); | ||
212 | 233 | ||
213 | return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */ | 234 | return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */ |
214 | } | 235 | } |
@@ -223,16 +244,16 @@ static enum ucode_state | |||
223 | generic_load_microcode(int cpu, const u8 *data, size_t size) | 244 | generic_load_microcode(int cpu, const u8 *data, size_t size) |
224 | { | 245 | { |
225 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 246 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
247 | struct microcode_header_amd *mc_hdr = NULL; | ||
248 | unsigned int mc_size, leftover; | ||
249 | int offset; | ||
226 | const u8 *ucode_ptr = data; | 250 | const u8 *ucode_ptr = data; |
227 | void *new_mc = NULL; | 251 | void *new_mc = NULL; |
228 | void *mc; | 252 | unsigned int new_rev = uci->cpu_sig.rev; |
229 | int new_rev = uci->cpu_sig.rev; | ||
230 | unsigned int leftover; | ||
231 | unsigned long offset; | ||
232 | enum ucode_state state = UCODE_OK; | 253 | enum ucode_state state = UCODE_OK; |
233 | 254 | ||
234 | offset = install_equiv_cpu_table(ucode_ptr); | 255 | offset = install_equiv_cpu_table(ucode_ptr); |
235 | if (!offset) { | 256 | if (offset < 0) { |
236 | pr_err("failed to create equivalent cpu table\n"); | 257 | pr_err("failed to create equivalent cpu table\n"); |
237 | return UCODE_ERROR; | 258 | return UCODE_ERROR; |
238 | } | 259 | } |
@@ -241,64 +262,65 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) | |||
241 | leftover = size - offset; | 262 | leftover = size - offset; |
242 | 263 | ||
243 | while (leftover) { | 264 | while (leftover) { |
244 | unsigned int uninitialized_var(mc_size); | 265 | mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size); |
245 | struct microcode_header_amd *mc_header; | 266 | if (!mc_hdr) |
246 | |||
247 | mc = get_next_ucode(ucode_ptr, leftover, &mc_size); | ||
248 | if (!mc) | ||
249 | break; | 267 | break; |
250 | 268 | ||
251 | mc_header = (struct microcode_header_amd *)mc; | 269 | if (get_matching_microcode(cpu, mc_hdr, new_rev)) { |
252 | if (get_matching_microcode(cpu, mc, new_rev)) { | ||
253 | vfree(new_mc); | 270 | vfree(new_mc); |
254 | new_rev = mc_header->patch_id; | 271 | new_rev = mc_hdr->patch_id; |
255 | new_mc = mc; | 272 | new_mc = mc_hdr; |
256 | } else | 273 | } else |
257 | vfree(mc); | 274 | vfree(mc_hdr); |
258 | 275 | ||
259 | ucode_ptr += mc_size; | 276 | ucode_ptr += mc_size; |
260 | leftover -= mc_size; | 277 | leftover -= mc_size; |
261 | } | 278 | } |
262 | 279 | ||
263 | if (new_mc) { | 280 | if (!new_mc) { |
264 | if (!leftover) { | ||
265 | vfree(uci->mc); | ||
266 | uci->mc = new_mc; | ||
267 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", | ||
268 | cpu, new_rev, uci->cpu_sig.rev); | ||
269 | } else { | ||
270 | vfree(new_mc); | ||
271 | state = UCODE_ERROR; | ||
272 | } | ||
273 | } else | ||
274 | state = UCODE_NFOUND; | 281 | state = UCODE_NFOUND; |
282 | goto free_table; | ||
283 | } | ||
275 | 284 | ||
285 | if (!leftover) { | ||
286 | vfree(uci->mc); | ||
287 | uci->mc = new_mc; | ||
288 | pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n", | ||
289 | cpu, uci->cpu_sig.rev, new_rev); | ||
290 | } else { | ||
291 | vfree(new_mc); | ||
292 | state = UCODE_ERROR; | ||
293 | } | ||
294 | |||
295 | free_table: | ||
276 | free_equiv_cpu_table(); | 296 | free_equiv_cpu_table(); |
277 | 297 | ||
278 | return state; | 298 | return state; |
279 | } | 299 | } |
280 | 300 | ||
281 | static enum ucode_state request_microcode_fw(int cpu, struct device *device) | 301 | static enum ucode_state request_microcode_amd(int cpu, struct device *device) |
282 | { | 302 | { |
283 | const char *fw_name = "amd-ucode/microcode_amd.bin"; | 303 | const char *fw_name = "amd-ucode/microcode_amd.bin"; |
284 | const struct firmware *firmware; | 304 | const struct firmware *fw; |
285 | enum ucode_state ret; | 305 | enum ucode_state ret = UCODE_NFOUND; |
286 | 306 | ||
287 | if (request_firmware(&firmware, fw_name, device)) { | 307 | if (request_firmware(&fw, fw_name, device)) { |
288 | printk(KERN_ERR "microcode: failed to load file %s\n", fw_name); | 308 | pr_err("failed to load file %s\n", fw_name); |
289 | return UCODE_NFOUND; | 309 | goto out; |
290 | } | 310 | } |
291 | 311 | ||
292 | if (*(u32 *)firmware->data != UCODE_MAGIC) { | 312 | ret = UCODE_ERROR; |
293 | pr_err("invalid UCODE_MAGIC (0x%08x)\n", | 313 | if (*(u32 *)fw->data != UCODE_MAGIC) { |
294 | *(u32 *)firmware->data); | 314 | pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); |
295 | return UCODE_ERROR; | 315 | goto fw_release; |
296 | } | 316 | } |
297 | 317 | ||
298 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); | 318 | ret = generic_load_microcode(cpu, fw->data, fw->size); |
299 | 319 | ||
300 | release_firmware(firmware); | 320 | fw_release: |
321 | release_firmware(fw); | ||
301 | 322 | ||
323 | out: | ||
302 | return ret; | 324 | return ret; |
303 | } | 325 | } |
304 | 326 | ||
@@ -319,7 +341,7 @@ static void microcode_fini_cpu_amd(int cpu) | |||
319 | 341 | ||
320 | static struct microcode_ops microcode_amd_ops = { | 342 | static struct microcode_ops microcode_amd_ops = { |
321 | .request_microcode_user = request_microcode_user, | 343 | .request_microcode_user = request_microcode_user, |
322 | .request_microcode_fw = request_microcode_fw, | 344 | .request_microcode_fw = request_microcode_amd, |
323 | .collect_cpu_info = collect_cpu_info_amd, | 345 | .collect_cpu_info = collect_cpu_info_amd, |
324 | .apply_microcode = apply_microcode_amd, | 346 | .apply_microcode = apply_microcode_amd, |
325 | .microcode_fini_cpu = microcode_fini_cpu_amd, | 347 | .microcode_fini_cpu = microcode_fini_cpu_amd, |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 1cca374a2bac..87af68e0e1e1 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -417,8 +417,10 @@ static int mc_sysdev_add(struct sys_device *sys_dev) | |||
417 | if (err) | 417 | if (err) |
418 | return err; | 418 | return err; |
419 | 419 | ||
420 | if (microcode_init_cpu(cpu) == UCODE_ERROR) | 420 | if (microcode_init_cpu(cpu) == UCODE_ERROR) { |
421 | err = -EINVAL; | 421 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); |
422 | return -EINVAL; | ||
423 | } | ||
422 | 424 | ||
423 | return err; | 425 | return err; |
424 | } | 426 | } |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 01b0f6d06451..6f789a887c06 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -883,7 +883,7 @@ static int __init update_mp_table(void) | |||
883 | 883 | ||
884 | if (!mpc_new_phys) { | 884 | if (!mpc_new_phys) { |
885 | unsigned char old, new; | 885 | unsigned char old, new; |
886 | /* check if we can change the postion */ | 886 | /* check if we can change the position */ |
887 | mpc->checksum = 0; | 887 | mpc->checksum = 0; |
888 | old = mpf_checksum((unsigned char *)mpc, mpc->length); | 888 | old = mpf_checksum((unsigned char *)mpc, mpc->length); |
889 | mpc->checksum = 0xff; | 889 | mpc->checksum = 0xff; |
@@ -892,7 +892,7 @@ static int __init update_mp_table(void) | |||
892 | printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n"); | 892 | printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n"); |
893 | return 0; | 893 | return 0; |
894 | } | 894 | } |
895 | printk(KERN_INFO "use in-positon replacing\n"); | 895 | printk(KERN_INFO "use in-position replacing\n"); |
896 | } else { | 896 | } else { |
897 | mpf->physptr = mpc_new_phys; | 897 | mpf->physptr = mpc_new_phys; |
898 | mpc_new = phys_to_virt(mpc_new_phys); | 898 | mpc_new = phys_to_virt(mpc_new_phys); |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index f56a117cef68..e8c33a302006 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -1279,7 +1279,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev) | |||
1279 | 1279 | ||
1280 | if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) { | 1280 | if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) { |
1281 | /* | 1281 | /* |
1282 | * FIXME: properly scan for devices accross the | 1282 | * FIXME: properly scan for devices across the |
1283 | * PCI-to-PCI bridge on every CalIOC2 port. | 1283 | * PCI-to-PCI bridge on every CalIOC2 port. |
1284 | */ | 1284 | */ |
1285 | return 1; | 1285 | return 1; |
@@ -1295,7 +1295,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev) | |||
1295 | 1295 | ||
1296 | /* | 1296 | /* |
1297 | * calgary_init_bitmap_from_tce_table(): | 1297 | * calgary_init_bitmap_from_tce_table(): |
1298 | * Funtion for kdump case. In the second/kdump kernel initialize | 1298 | * Function for kdump case. In the second/kdump kernel initialize |
1299 | * the bitmap based on the tce table entries obtained from first kernel | 1299 | * the bitmap based on the tce table entries obtained from first kernel |
1300 | */ | 1300 | */ |
1301 | static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl) | 1301 | static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl) |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ff4554198981..d46cbe46b7ab 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -87,7 +87,7 @@ void exit_thread(void) | |||
87 | void show_regs(struct pt_regs *regs) | 87 | void show_regs(struct pt_regs *regs) |
88 | { | 88 | { |
89 | show_registers(regs); | 89 | show_registers(regs); |
90 | show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs)); | 90 | show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 0); |
91 | } | 91 | } |
92 | 92 | ||
93 | void show_regs_common(void) | 93 | void show_regs_common(void) |
@@ -110,12 +110,9 @@ void show_regs_common(void) | |||
110 | init_utsname()->release, | 110 | init_utsname()->release, |
111 | (int)strcspn(init_utsname()->version, " "), | 111 | (int)strcspn(init_utsname()->version, " "), |
112 | init_utsname()->version); | 112 | init_utsname()->version); |
113 | printk(KERN_CONT " "); | 113 | printk(KERN_CONT " %s %s", vendor, product); |
114 | printk(KERN_CONT "%s %s", vendor, product); | 114 | if (board) |
115 | if (board) { | 115 | printk(KERN_CONT "/%s", board); |
116 | printk(KERN_CONT "/"); | ||
117 | printk(KERN_CONT "%s", board); | ||
118 | } | ||
119 | printk(KERN_CONT "\n"); | 116 | printk(KERN_CONT "\n"); |
120 | } | 117 | } |
121 | 118 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 715037caeb43..d3ce37edb54d 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -303,68 +303,16 @@ static int __init reboot_init(void) | |||
303 | } | 303 | } |
304 | core_initcall(reboot_init); | 304 | core_initcall(reboot_init); |
305 | 305 | ||
306 | /* The following code and data reboots the machine by switching to real | 306 | extern const unsigned char machine_real_restart_asm[]; |
307 | mode and jumping to the BIOS reset entry point, as if the CPU has | 307 | extern const u64 machine_real_restart_gdt[3]; |
308 | really been reset. The previous version asked the keyboard | ||
309 | controller to pulse the CPU reset line, which is more thorough, but | ||
310 | doesn't work with at least one type of 486 motherboard. It is easy | ||
311 | to stop this code working; hence the copious comments. */ | ||
312 | static const unsigned long long | ||
313 | real_mode_gdt_entries [3] = | ||
314 | { | ||
315 | 0x0000000000000000ULL, /* Null descriptor */ | ||
316 | 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ | ||
317 | 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ | ||
318 | }; | ||
319 | 308 | ||
320 | static const struct desc_ptr | 309 | void machine_real_restart(unsigned int type) |
321 | real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries }, | ||
322 | real_mode_idt = { 0x3ff, 0 }; | ||
323 | |||
324 | /* This is 16-bit protected mode code to disable paging and the cache, | ||
325 | switch to real mode and jump to the BIOS reset code. | ||
326 | |||
327 | The instruction that switches to real mode by writing to CR0 must be | ||
328 | followed immediately by a far jump instruction, which set CS to a | ||
329 | valid value for real mode, and flushes the prefetch queue to avoid | ||
330 | running instructions that have already been decoded in protected | ||
331 | mode. | ||
332 | |||
333 | Clears all the flags except ET, especially PG (paging), PE | ||
334 | (protected-mode enable) and TS (task switch for coprocessor state | ||
335 | save). Flushes the TLB after paging has been disabled. Sets CD and | ||
336 | NW, to disable the cache on a 486, and invalidates the cache. This | ||
337 | is more like the state of a 486 after reset. I don't know if | ||
338 | something else should be done for other chips. | ||
339 | |||
340 | More could be done here to set up the registers as if a CPU reset had | ||
341 | occurred; hopefully real BIOSs don't assume much. */ | ||
342 | static const unsigned char real_mode_switch [] = | ||
343 | { | ||
344 | 0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */ | ||
345 | 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */ | ||
346 | 0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */ | ||
347 | 0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */ | ||
348 | 0x66, 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */ | ||
349 | 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */ | ||
350 | 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */ | ||
351 | 0x74, 0x02, /* jz f */ | ||
352 | 0x0f, 0x09, /* wbinvd */ | ||
353 | 0x24, 0x10, /* f: andb $0x10,al */ | ||
354 | 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */ | ||
355 | }; | ||
356 | static const unsigned char jump_to_bios [] = | ||
357 | { | 310 | { |
358 | 0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */ | 311 | void *restart_va; |
359 | }; | 312 | unsigned long restart_pa; |
313 | void (*restart_lowmem)(unsigned int); | ||
314 | u64 *lowmem_gdt; | ||
360 | 315 | ||
361 | /* | ||
362 | * Switch to real mode and then execute the code | ||
363 | * specified by the code and length parameters. | ||
364 | * We assume that length will aways be less that 100! | ||
365 | */ | ||
366 | void machine_real_restart(const unsigned char *code, int length) | ||
367 | { | ||
368 | local_irq_disable(); | 316 | local_irq_disable(); |
369 | 317 | ||
370 | /* Write zero to CMOS register number 0x0f, which the BIOS POST | 318 | /* Write zero to CMOS register number 0x0f, which the BIOS POST |
@@ -392,41 +340,23 @@ void machine_real_restart(const unsigned char *code, int length) | |||
392 | too. */ | 340 | too. */ |
393 | *((unsigned short *)0x472) = reboot_mode; | 341 | *((unsigned short *)0x472) = reboot_mode; |
394 | 342 | ||
395 | /* For the switch to real mode, copy some code to low memory. It has | 343 | /* Patch the GDT in the low memory trampoline */ |
396 | to be in the first 64k because it is running in 16-bit mode, and it | 344 | lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt); |
397 | has to have the same physical and virtual address, because it turns | 345 | |
398 | off paging. Copy it near the end of the first page, out of the way | 346 | restart_va = TRAMPOLINE_SYM(machine_real_restart_asm); |
399 | of BIOS variables. */ | 347 | restart_pa = virt_to_phys(restart_va); |
400 | memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100), | 348 | restart_lowmem = (void (*)(unsigned int))restart_pa; |
401 | real_mode_switch, sizeof (real_mode_switch)); | 349 | |
402 | memcpy((void *)(0x1000 - 100), code, length); | 350 | /* GDT[0]: GDT self-pointer */ |
403 | 351 | lowmem_gdt[0] = | |
404 | /* Set up the IDT for real mode. */ | 352 | (u64)(sizeof(machine_real_restart_gdt) - 1) + |
405 | load_idt(&real_mode_idt); | 353 | ((u64)virt_to_phys(lowmem_gdt) << 16); |
406 | 354 | /* GDT[1]: 64K real mode code segment */ | |
407 | /* Set up a GDT from which we can load segment descriptors for real | 355 | lowmem_gdt[1] = |
408 | mode. The GDT is not used in real mode; it is just needed here to | 356 | GDT_ENTRY(0x009b, restart_pa, 0xffff); |
409 | prepare the descriptors. */ | 357 | |
410 | load_gdt(&real_mode_gdt); | 358 | /* Jump to the identity-mapped low memory code */ |
411 | 359 | restart_lowmem(type); | |
412 | /* Load the data segment registers, and thus the descriptors ready for | ||
413 | real mode. The base address of each segment is 0x100, 16 times the | ||
414 | selector value being loaded here. This is so that the segment | ||
415 | registers don't have to be reloaded after switching to real mode: | ||
416 | the values are consistent for real mode operation already. */ | ||
417 | __asm__ __volatile__ ("movl $0x0010,%%eax\n" | ||
418 | "\tmovl %%eax,%%ds\n" | ||
419 | "\tmovl %%eax,%%es\n" | ||
420 | "\tmovl %%eax,%%fs\n" | ||
421 | "\tmovl %%eax,%%gs\n" | ||
422 | "\tmovl %%eax,%%ss" : : : "eax"); | ||
423 | |||
424 | /* Jump to the 16-bit code that we copied earlier. It disables paging | ||
425 | and the cache, switches to real mode, and jumps to the BIOS reset | ||
426 | entry point. */ | ||
427 | __asm__ __volatile__ ("ljmp $0x0008,%0" | ||
428 | : | ||
429 | : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100))); | ||
430 | } | 360 | } |
431 | #ifdef CONFIG_APM_MODULE | 361 | #ifdef CONFIG_APM_MODULE |
432 | EXPORT_SYMBOL(machine_real_restart); | 362 | EXPORT_SYMBOL(machine_real_restart); |
@@ -581,7 +511,7 @@ static void native_machine_emergency_restart(void) | |||
581 | 511 | ||
582 | #ifdef CONFIG_X86_32 | 512 | #ifdef CONFIG_X86_32 |
583 | case BOOT_BIOS: | 513 | case BOOT_BIOS: |
584 | machine_real_restart(jump_to_bios, sizeof(jump_to_bios)); | 514 | machine_real_restart(MRR_BIOS); |
585 | 515 | ||
586 | reboot_type = BOOT_KBD; | 516 | reboot_type = BOOT_KBD; |
587 | break; | 517 | break; |
diff --git a/arch/x86/kernel/reboot_32.S b/arch/x86/kernel/reboot_32.S new file mode 100644 index 000000000000..29092b38d816 --- /dev/null +++ b/arch/x86/kernel/reboot_32.S | |||
@@ -0,0 +1,135 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | #include <linux/init.h> | ||
3 | #include <asm/segment.h> | ||
4 | #include <asm/page_types.h> | ||
5 | |||
6 | /* | ||
7 | * The following code and data reboots the machine by switching to real | ||
8 | * mode and jumping to the BIOS reset entry point, as if the CPU has | ||
9 | * really been reset. The previous version asked the keyboard | ||
10 | * controller to pulse the CPU reset line, which is more thorough, but | ||
11 | * doesn't work with at least one type of 486 motherboard. It is easy | ||
12 | * to stop this code working; hence the copious comments. | ||
13 | * | ||
14 | * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax. | ||
15 | */ | ||
16 | .section ".x86_trampoline","a" | ||
17 | .balign 16 | ||
18 | .code32 | ||
19 | ENTRY(machine_real_restart_asm) | ||
20 | r_base = . | ||
21 | /* Get our own relocated address */ | ||
22 | call 1f | ||
23 | 1: popl %ebx | ||
24 | subl $1b, %ebx | ||
25 | |||
26 | /* Compute the equivalent real-mode segment */ | ||
27 | movl %ebx, %ecx | ||
28 | shrl $4, %ecx | ||
29 | |||
30 | /* Patch post-real-mode segment jump */ | ||
31 | movw dispatch_table(%ebx,%eax,2),%ax | ||
32 | movw %ax, 101f(%ebx) | ||
33 | movw %cx, 102f(%ebx) | ||
34 | |||
35 | /* Set up the IDT for real mode. */ | ||
36 | lidtl machine_real_restart_idt(%ebx) | ||
37 | |||
38 | /* | ||
39 | * Set up a GDT from which we can load segment descriptors for real | ||
40 | * mode. The GDT is not used in real mode; it is just needed here to | ||
41 | * prepare the descriptors. | ||
42 | */ | ||
43 | lgdtl machine_real_restart_gdt(%ebx) | ||
44 | |||
45 | /* | ||
46 | * Load the data segment registers with 16-bit compatible values | ||
47 | */ | ||
48 | movl $16, %ecx | ||
49 | movl %ecx, %ds | ||
50 | movl %ecx, %es | ||
51 | movl %ecx, %fs | ||
52 | movl %ecx, %gs | ||
53 | movl %ecx, %ss | ||
54 | ljmpl $8, $1f - r_base | ||
55 | |||
56 | /* | ||
57 | * This is 16-bit protected mode code to disable paging and the cache, | ||
58 | * switch to real mode and jump to the BIOS reset code. | ||
59 | * | ||
60 | * The instruction that switches to real mode by writing to CR0 must be | ||
61 | * followed immediately by a far jump instruction, which set CS to a | ||
62 | * valid value for real mode, and flushes the prefetch queue to avoid | ||
63 | * running instructions that have already been decoded in protected | ||
64 | * mode. | ||
65 | * | ||
66 | * Clears all the flags except ET, especially PG (paging), PE | ||
67 | * (protected-mode enable) and TS (task switch for coprocessor state | ||
68 | * save). Flushes the TLB after paging has been disabled. Sets CD and | ||
69 | * NW, to disable the cache on a 486, and invalidates the cache. This | ||
70 | * is more like the state of a 486 after reset. I don't know if | ||
71 | * something else should be done for other chips. | ||
72 | * | ||
73 | * More could be done here to set up the registers as if a CPU reset had | ||
74 | * occurred; hopefully real BIOSs don't assume much. This is not the | ||
75 | * actual BIOS entry point, anyway (that is at 0xfffffff0). | ||
76 | * | ||
77 | * Most of this work is probably excessive, but it is what is tested. | ||
78 | */ | ||
79 | .code16 | ||
80 | 1: | ||
81 | xorl %ecx, %ecx | ||
82 | movl %cr0, %eax | ||
83 | andl $0x00000011, %eax | ||
84 | orl $0x60000000, %eax | ||
85 | movl %eax, %cr0 | ||
86 | movl %ecx, %cr3 | ||
87 | movl %cr0, %edx | ||
88 | andl $0x60000000, %edx /* If no cache bits -> no wbinvd */ | ||
89 | jz 2f | ||
90 | wbinvd | ||
91 | 2: | ||
92 | andb $0x10, %al | ||
93 | movl %eax, %cr0 | ||
94 | .byte 0xea /* ljmpw */ | ||
95 | 101: .word 0 /* Offset */ | ||
96 | 102: .word 0 /* Segment */ | ||
97 | |||
98 | bios: | ||
99 | ljmpw $0xf000, $0xfff0 | ||
100 | |||
101 | apm: | ||
102 | movw $0x1000, %ax | ||
103 | movw %ax, %ss | ||
104 | movw $0xf000, %sp | ||
105 | movw $0x5307, %ax | ||
106 | movw $0x0001, %bx | ||
107 | movw $0x0003, %cx | ||
108 | int $0x15 | ||
109 | |||
110 | END(machine_real_restart_asm) | ||
111 | |||
112 | .balign 16 | ||
113 | /* These must match <asm/reboot.h */ | ||
114 | dispatch_table: | ||
115 | .word bios - r_base | ||
116 | .word apm - r_base | ||
117 | END(dispatch_table) | ||
118 | |||
119 | .balign 16 | ||
120 | machine_real_restart_idt: | ||
121 | .word 0xffff /* Length - real mode default value */ | ||
122 | .long 0 /* Base - real mode default value */ | ||
123 | END(machine_real_restart_idt) | ||
124 | |||
125 | .balign 16 | ||
126 | ENTRY(machine_real_restart_gdt) | ||
127 | .quad 0 /* Self-pointer, filled in by PM code */ | ||
128 | .quad 0 /* 16-bit code segment, filled in by PM code */ | ||
129 | /* | ||
130 | * 16-bit data segment with the selector value 16 = 0x10 and | ||
131 | * base value 0x100; since this is consistent with real mode | ||
132 | * semantics we don't have to reload the segments once CR0.PE = 0. | ||
133 | */ | ||
134 | .quad GDT_ENTRY(0x0093, 0x100, 0xffff) | ||
135 | END(machine_real_restart_gdt) | ||
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 6f39cab052d5..3f2ad2640d85 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/acpi.h> | 6 | #include <linux/acpi.h> |
7 | #include <linux/bcd.h> | 7 | #include <linux/bcd.h> |
8 | #include <linux/pnp.h> | 8 | #include <linux/pnp.h> |
9 | #include <linux/of.h> | ||
9 | 10 | ||
10 | #include <asm/vsyscall.h> | 11 | #include <asm/vsyscall.h> |
11 | #include <asm/x86_init.h> | 12 | #include <asm/x86_init.h> |
@@ -236,6 +237,8 @@ static __init int add_rtc_cmos(void) | |||
236 | } | 237 | } |
237 | } | 238 | } |
238 | #endif | 239 | #endif |
240 | if (of_have_populated_dt()) | ||
241 | return 0; | ||
239 | 242 | ||
240 | platform_device_register(&rtc_device); | 243 | platform_device_register(&rtc_device); |
241 | dev_info(&rtc_device.dev, | 244 | dev_info(&rtc_device.dev, |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d3cfe26c0252..32bd87cbf982 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -113,6 +113,7 @@ | |||
113 | #endif | 113 | #endif |
114 | #include <asm/mce.h> | 114 | #include <asm/mce.h> |
115 | #include <asm/alternative.h> | 115 | #include <asm/alternative.h> |
116 | #include <asm/prom.h> | ||
116 | 117 | ||
117 | /* | 118 | /* |
118 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | 119 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. |
@@ -297,6 +298,9 @@ static void __init init_gbpages(void) | |||
297 | static inline void init_gbpages(void) | 298 | static inline void init_gbpages(void) |
298 | { | 299 | { |
299 | } | 300 | } |
301 | static void __init cleanup_highmap(void) | ||
302 | { | ||
303 | } | ||
300 | #endif | 304 | #endif |
301 | 305 | ||
302 | static void __init reserve_brk(void) | 306 | static void __init reserve_brk(void) |
@@ -429,16 +433,30 @@ static void __init parse_setup_data(void) | |||
429 | return; | 433 | return; |
430 | pa_data = boot_params.hdr.setup_data; | 434 | pa_data = boot_params.hdr.setup_data; |
431 | while (pa_data) { | 435 | while (pa_data) { |
432 | data = early_memremap(pa_data, PAGE_SIZE); | 436 | u32 data_len, map_len; |
437 | |||
438 | map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK), | ||
439 | (u64)sizeof(struct setup_data)); | ||
440 | data = early_memremap(pa_data, map_len); | ||
441 | data_len = data->len + sizeof(struct setup_data); | ||
442 | if (data_len > map_len) { | ||
443 | early_iounmap(data, map_len); | ||
444 | data = early_memremap(pa_data, data_len); | ||
445 | map_len = data_len; | ||
446 | } | ||
447 | |||
433 | switch (data->type) { | 448 | switch (data->type) { |
434 | case SETUP_E820_EXT: | 449 | case SETUP_E820_EXT: |
435 | parse_e820_ext(data, pa_data); | 450 | parse_e820_ext(data); |
451 | break; | ||
452 | case SETUP_DTB: | ||
453 | add_dtb(pa_data); | ||
436 | break; | 454 | break; |
437 | default: | 455 | default: |
438 | break; | 456 | break; |
439 | } | 457 | } |
440 | pa_data = data->next; | 458 | pa_data = data->next; |
441 | early_iounmap(data, PAGE_SIZE); | 459 | early_iounmap(data, map_len); |
442 | } | 460 | } |
443 | } | 461 | } |
444 | 462 | ||
@@ -680,15 +698,6 @@ static int __init parse_reservelow(char *p) | |||
680 | 698 | ||
681 | early_param("reservelow", parse_reservelow); | 699 | early_param("reservelow", parse_reservelow); |
682 | 700 | ||
683 | static u64 __init get_max_mapped(void) | ||
684 | { | ||
685 | u64 end = max_pfn_mapped; | ||
686 | |||
687 | end <<= PAGE_SHIFT; | ||
688 | |||
689 | return end; | ||
690 | } | ||
691 | |||
692 | /* | 701 | /* |
693 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 702 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
694 | * passed the efi memmap, systab, etc., so we should use these data structures | 703 | * passed the efi memmap, systab, etc., so we should use these data structures |
@@ -704,8 +713,6 @@ static u64 __init get_max_mapped(void) | |||
704 | 713 | ||
705 | void __init setup_arch(char **cmdline_p) | 714 | void __init setup_arch(char **cmdline_p) |
706 | { | 715 | { |
707 | int acpi = 0; | ||
708 | int amd = 0; | ||
709 | unsigned long flags; | 716 | unsigned long flags; |
710 | 717 | ||
711 | #ifdef CONFIG_X86_32 | 718 | #ifdef CONFIG_X86_32 |
@@ -922,6 +929,8 @@ void __init setup_arch(char **cmdline_p) | |||
922 | */ | 929 | */ |
923 | reserve_brk(); | 930 | reserve_brk(); |
924 | 931 | ||
932 | cleanup_highmap(); | ||
933 | |||
925 | memblock.current_limit = get_max_mapped(); | 934 | memblock.current_limit = get_max_mapped(); |
926 | memblock_x86_fill(); | 935 | memblock_x86_fill(); |
927 | 936 | ||
@@ -935,15 +944,8 @@ void __init setup_arch(char **cmdline_p) | |||
935 | printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", | 944 | printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", |
936 | max_pfn_mapped<<PAGE_SHIFT); | 945 | max_pfn_mapped<<PAGE_SHIFT); |
937 | 946 | ||
938 | reserve_trampoline_memory(); | 947 | setup_trampolines(); |
939 | 948 | ||
940 | #ifdef CONFIG_ACPI_SLEEP | ||
941 | /* | ||
942 | * Reserve low memory region for sleep support. | ||
943 | * even before init_memory_mapping | ||
944 | */ | ||
945 | acpi_reserve_wakeup_memory(); | ||
946 | #endif | ||
947 | init_gbpages(); | 949 | init_gbpages(); |
948 | 950 | ||
949 | /* max_pfn_mapped is updated here */ | 951 | /* max_pfn_mapped is updated here */ |
@@ -984,19 +986,7 @@ void __init setup_arch(char **cmdline_p) | |||
984 | 986 | ||
985 | early_acpi_boot_init(); | 987 | early_acpi_boot_init(); |
986 | 988 | ||
987 | #ifdef CONFIG_ACPI_NUMA | 989 | initmem_init(); |
988 | /* | ||
989 | * Parse SRAT to discover nodes. | ||
990 | */ | ||
991 | acpi = acpi_numa_init(); | ||
992 | #endif | ||
993 | |||
994 | #ifdef CONFIG_AMD_NUMA | ||
995 | if (!acpi) | ||
996 | amd = !amd_numa_init(0, max_pfn); | ||
997 | #endif | ||
998 | |||
999 | initmem_init(0, max_pfn, acpi, amd); | ||
1000 | memblock_find_dma_reserve(); | 990 | memblock_find_dma_reserve(); |
1001 | dma32_reserve_bootmem(); | 991 | dma32_reserve_bootmem(); |
1002 | 992 | ||
@@ -1029,8 +1019,8 @@ void __init setup_arch(char **cmdline_p) | |||
1029 | * Read APIC and some other early information from ACPI tables. | 1019 | * Read APIC and some other early information from ACPI tables. |
1030 | */ | 1020 | */ |
1031 | acpi_boot_init(); | 1021 | acpi_boot_init(); |
1032 | |||
1033 | sfi_init(); | 1022 | sfi_init(); |
1023 | x86_dtb_init(); | ||
1034 | 1024 | ||
1035 | /* | 1025 | /* |
1036 | * get boot-time SMP configuration: | 1026 | * get boot-time SMP configuration: |
@@ -1040,9 +1030,7 @@ void __init setup_arch(char **cmdline_p) | |||
1040 | 1030 | ||
1041 | prefill_possible_map(); | 1031 | prefill_possible_map(); |
1042 | 1032 | ||
1043 | #ifdef CONFIG_X86_64 | ||
1044 | init_cpu_to_node(); | 1033 | init_cpu_to_node(); |
1045 | #endif | ||
1046 | 1034 | ||
1047 | init_apic_mappings(); | 1035 | init_apic_mappings(); |
1048 | ioapic_and_gsi_init(); | 1036 | ioapic_and_gsi_init(); |
@@ -1066,6 +1054,8 @@ void __init setup_arch(char **cmdline_p) | |||
1066 | #endif | 1054 | #endif |
1067 | x86_init.oem.banner(); | 1055 | x86_init.oem.banner(); |
1068 | 1056 | ||
1057 | x86_init.timers.wallclock_init(); | ||
1058 | |||
1069 | mcheck_init(); | 1059 | mcheck_init(); |
1070 | 1060 | ||
1071 | local_irq_save(flags); | 1061 | local_irq_save(flags); |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 002b79685f73..71f4727da373 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -225,10 +225,15 @@ void __init setup_per_cpu_areas(void) | |||
225 | per_cpu(x86_bios_cpu_apicid, cpu) = | 225 | per_cpu(x86_bios_cpu_apicid, cpu) = |
226 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); | 226 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); |
227 | #endif | 227 | #endif |
228 | #ifdef CONFIG_X86_32 | ||
229 | per_cpu(x86_cpu_to_logical_apicid, cpu) = | ||
230 | early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); | ||
231 | #endif | ||
228 | #ifdef CONFIG_X86_64 | 232 | #ifdef CONFIG_X86_64 |
229 | per_cpu(irq_stack_ptr, cpu) = | 233 | per_cpu(irq_stack_ptr, cpu) = |
230 | per_cpu(irq_stack_union.irq_stack, cpu) + | 234 | per_cpu(irq_stack_union.irq_stack, cpu) + |
231 | IRQ_STACK_SIZE - 64; | 235 | IRQ_STACK_SIZE - 64; |
236 | #endif | ||
232 | #ifdef CONFIG_NUMA | 237 | #ifdef CONFIG_NUMA |
233 | per_cpu(x86_cpu_to_node_map, cpu) = | 238 | per_cpu(x86_cpu_to_node_map, cpu) = |
234 | early_per_cpu_map(x86_cpu_to_node_map, cpu); | 239 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
@@ -242,7 +247,6 @@ void __init setup_per_cpu_areas(void) | |||
242 | */ | 247 | */ |
243 | set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); | 248 | set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); |
244 | #endif | 249 | #endif |
245 | #endif | ||
246 | /* | 250 | /* |
247 | * Up to this point, the boot CPU has been using .init.data | 251 | * Up to this point, the boot CPU has been using .init.data |
248 | * area. Reload any changed state for the boot CPU. | 252 | * area. Reload any changed state for the boot CPU. |
@@ -256,7 +260,10 @@ void __init setup_per_cpu_areas(void) | |||
256 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; | 260 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
257 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | 261 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; |
258 | #endif | 262 | #endif |
259 | #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) | 263 | #ifdef CONFIG_X86_32 |
264 | early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL; | ||
265 | #endif | ||
266 | #ifdef CONFIG_NUMA | ||
260 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | 267 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; |
261 | #endif | 268 | #endif |
262 | 269 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 08776a953487..c2871d3c71b6 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #include <asm/mtrr.h> | 64 | #include <asm/mtrr.h> |
65 | #include <asm/mwait.h> | 65 | #include <asm/mwait.h> |
66 | #include <asm/apic.h> | 66 | #include <asm/apic.h> |
67 | #include <asm/io_apic.h> | ||
67 | #include <asm/setup.h> | 68 | #include <asm/setup.h> |
68 | #include <asm/uv/uv.h> | 69 | #include <asm/uv/uv.h> |
69 | #include <linux/mc146818rtc.h> | 70 | #include <linux/mc146818rtc.h> |
@@ -71,10 +72,6 @@ | |||
71 | #include <asm/smpboot_hooks.h> | 72 | #include <asm/smpboot_hooks.h> |
72 | #include <asm/i8259.h> | 73 | #include <asm/i8259.h> |
73 | 74 | ||
74 | #ifdef CONFIG_X86_32 | ||
75 | u8 apicid_2_node[MAX_APICID]; | ||
76 | #endif | ||
77 | |||
78 | /* State of each CPU */ | 75 | /* State of each CPU */ |
79 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 76 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
80 | 77 | ||
@@ -130,68 +127,14 @@ EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | |||
130 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); | 127 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
131 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | 128 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
132 | 129 | ||
130 | DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); | ||
131 | |||
133 | /* Per CPU bogomips and other parameters */ | 132 | /* Per CPU bogomips and other parameters */ |
134 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 133 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
135 | EXPORT_PER_CPU_SYMBOL(cpu_info); | 134 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
136 | 135 | ||
137 | atomic_t init_deasserted; | 136 | atomic_t init_deasserted; |
138 | 137 | ||
139 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | ||
140 | /* which node each logical CPU is on */ | ||
141 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | ||
142 | EXPORT_SYMBOL(cpu_to_node_map); | ||
143 | |||
144 | /* set up a mapping between cpu and node. */ | ||
145 | static void map_cpu_to_node(int cpu, int node) | ||
146 | { | ||
147 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); | ||
148 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | ||
149 | cpu_to_node_map[cpu] = node; | ||
150 | } | ||
151 | |||
152 | /* undo a mapping between cpu and node. */ | ||
153 | static void unmap_cpu_to_node(int cpu) | ||
154 | { | ||
155 | int node; | ||
156 | |||
157 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); | ||
158 | for (node = 0; node < MAX_NUMNODES; node++) | ||
159 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); | ||
160 | cpu_to_node_map[cpu] = 0; | ||
161 | } | ||
162 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | ||
163 | #define map_cpu_to_node(cpu, node) ({}) | ||
164 | #define unmap_cpu_to_node(cpu) ({}) | ||
165 | #endif | ||
166 | |||
167 | #ifdef CONFIG_X86_32 | ||
168 | static int boot_cpu_logical_apicid; | ||
169 | |||
170 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = | ||
171 | { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
172 | |||
173 | static void map_cpu_to_logical_apicid(void) | ||
174 | { | ||
175 | int cpu = smp_processor_id(); | ||
176 | int apicid = logical_smp_processor_id(); | ||
177 | int node = apic->apicid_to_node(apicid); | ||
178 | |||
179 | if (!node_online(node)) | ||
180 | node = first_online_node; | ||
181 | |||
182 | cpu_2_logical_apicid[cpu] = apicid; | ||
183 | map_cpu_to_node(cpu, node); | ||
184 | } | ||
185 | |||
186 | void numa_remove_cpu(int cpu) | ||
187 | { | ||
188 | cpu_2_logical_apicid[cpu] = BAD_APICID; | ||
189 | unmap_cpu_to_node(cpu); | ||
190 | } | ||
191 | #else | ||
192 | #define map_cpu_to_logical_apicid() do {} while (0) | ||
193 | #endif | ||
194 | |||
195 | /* | 138 | /* |
196 | * Report back to the Boot Processor. | 139 | * Report back to the Boot Processor. |
197 | * Running on AP. | 140 | * Running on AP. |
@@ -259,7 +202,6 @@ static void __cpuinit smp_callin(void) | |||
259 | apic->smp_callin_clear_local_apic(); | 202 | apic->smp_callin_clear_local_apic(); |
260 | setup_local_APIC(); | 203 | setup_local_APIC(); |
261 | end_local_APIC_setup(); | 204 | end_local_APIC_setup(); |
262 | map_cpu_to_logical_apicid(); | ||
263 | 205 | ||
264 | /* | 206 | /* |
265 | * Need to setup vector mappings before we enable interrupts. | 207 | * Need to setup vector mappings before we enable interrupts. |
@@ -355,23 +297,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
355 | cpu_idle(); | 297 | cpu_idle(); |
356 | } | 298 | } |
357 | 299 | ||
358 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
359 | /* In this case, llc_shared_map is a pointer to a cpumask. */ | ||
360 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
361 | const struct cpuinfo_x86 *src) | ||
362 | { | ||
363 | struct cpumask *llc = dst->llc_shared_map; | ||
364 | *dst = *src; | ||
365 | dst->llc_shared_map = llc; | ||
366 | } | ||
367 | #else | ||
368 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
369 | const struct cpuinfo_x86 *src) | ||
370 | { | ||
371 | *dst = *src; | ||
372 | } | ||
373 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
374 | |||
375 | /* | 300 | /* |
376 | * The bootstrap kernel entry code has set these up. Save them for | 301 | * The bootstrap kernel entry code has set these up. Save them for |
377 | * a given CPU | 302 | * a given CPU |
@@ -381,7 +306,7 @@ void __cpuinit smp_store_cpu_info(int id) | |||
381 | { | 306 | { |
382 | struct cpuinfo_x86 *c = &cpu_data(id); | 307 | struct cpuinfo_x86 *c = &cpu_data(id); |
383 | 308 | ||
384 | copy_cpuinfo_x86(c, &boot_cpu_data); | 309 | *c = boot_cpu_data; |
385 | c->cpu_index = id; | 310 | c->cpu_index = id; |
386 | if (id != 0) | 311 | if (id != 0) |
387 | identify_secondary_cpu(c); | 312 | identify_secondary_cpu(c); |
@@ -389,15 +314,12 @@ void __cpuinit smp_store_cpu_info(int id) | |||
389 | 314 | ||
390 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | 315 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) |
391 | { | 316 | { |
392 | struct cpuinfo_x86 *c1 = &cpu_data(cpu1); | ||
393 | struct cpuinfo_x86 *c2 = &cpu_data(cpu2); | ||
394 | |||
395 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | 317 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); |
396 | cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); | 318 | cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); |
397 | cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); | 319 | cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); |
398 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | 320 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); |
399 | cpumask_set_cpu(cpu1, c2->llc_shared_map); | 321 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); |
400 | cpumask_set_cpu(cpu2, c1->llc_shared_map); | 322 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); |
401 | } | 323 | } |
402 | 324 | ||
403 | 325 | ||
@@ -414,6 +336,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
414 | 336 | ||
415 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { | 337 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { |
416 | if (c->phys_proc_id == o->phys_proc_id && | 338 | if (c->phys_proc_id == o->phys_proc_id && |
339 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) && | ||
417 | c->compute_unit_id == o->compute_unit_id) | 340 | c->compute_unit_id == o->compute_unit_id) |
418 | link_thread_siblings(cpu, i); | 341 | link_thread_siblings(cpu, i); |
419 | } else if (c->phys_proc_id == o->phys_proc_id && | 342 | } else if (c->phys_proc_id == o->phys_proc_id && |
@@ -425,7 +348,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
425 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | 348 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
426 | } | 349 | } |
427 | 350 | ||
428 | cpumask_set_cpu(cpu, c->llc_shared_map); | 351 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); |
429 | 352 | ||
430 | if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { | 353 | if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { |
431 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); | 354 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); |
@@ -436,8 +359,8 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
436 | for_each_cpu(i, cpu_sibling_setup_mask) { | 359 | for_each_cpu(i, cpu_sibling_setup_mask) { |
437 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | 360 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
438 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 361 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
439 | cpumask_set_cpu(i, c->llc_shared_map); | 362 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); |
440 | cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map); | 363 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); |
441 | } | 364 | } |
442 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 365 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
443 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 366 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
@@ -476,7 +399,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
476 | !(cpu_has(c, X86_FEATURE_AMD_DCM))) | 399 | !(cpu_has(c, X86_FEATURE_AMD_DCM))) |
477 | return cpu_core_mask(cpu); | 400 | return cpu_core_mask(cpu); |
478 | else | 401 | else |
479 | return c->llc_shared_map; | 402 | return cpu_llc_shared_mask(cpu); |
480 | } | 403 | } |
481 | 404 | ||
482 | static void impress_friends(void) | 405 | static void impress_friends(void) |
@@ -788,7 +711,7 @@ do_rest: | |||
788 | stack_start = c_idle.idle->thread.sp; | 711 | stack_start = c_idle.idle->thread.sp; |
789 | 712 | ||
790 | /* start_ip had better be page-aligned! */ | 713 | /* start_ip had better be page-aligned! */ |
791 | start_ip = setup_trampoline(); | 714 | start_ip = trampoline_address(); |
792 | 715 | ||
793 | /* So we see what's up */ | 716 | /* So we see what's up */ |
794 | announce_cpu(cpu, apicid); | 717 | announce_cpu(cpu, apicid); |
@@ -798,6 +721,8 @@ do_rest: | |||
798 | * the targeted processor. | 721 | * the targeted processor. |
799 | */ | 722 | */ |
800 | 723 | ||
724 | printk(KERN_DEBUG "smpboot cpu %d: start_ip = %lx\n", cpu, start_ip); | ||
725 | |||
801 | atomic_set(&init_deasserted, 0); | 726 | atomic_set(&init_deasserted, 0); |
802 | 727 | ||
803 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { | 728 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
@@ -851,8 +776,8 @@ do_rest: | |||
851 | pr_debug("CPU%d: has booted.\n", cpu); | 776 | pr_debug("CPU%d: has booted.\n", cpu); |
852 | else { | 777 | else { |
853 | boot_error = 1; | 778 | boot_error = 1; |
854 | if (*((volatile unsigned char *)trampoline_base) | 779 | if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) |
855 | == 0xA5) | 780 | == 0xA5A5A5A5) |
856 | /* trampoline started but...? */ | 781 | /* trampoline started but...? */ |
857 | pr_err("CPU%d: Stuck ??\n", cpu); | 782 | pr_err("CPU%d: Stuck ??\n", cpu); |
858 | else | 783 | else |
@@ -878,7 +803,7 @@ do_rest: | |||
878 | } | 803 | } |
879 | 804 | ||
880 | /* mark "stuck" area as not stuck */ | 805 | /* mark "stuck" area as not stuck */ |
881 | *((volatile unsigned long *)trampoline_base) = 0; | 806 | *(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0; |
882 | 807 | ||
883 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { | 808 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
884 | /* | 809 | /* |
@@ -945,6 +870,14 @@ int __cpuinit native_cpu_up(unsigned int cpu) | |||
945 | return 0; | 870 | return 0; |
946 | } | 871 | } |
947 | 872 | ||
873 | /** | ||
874 | * arch_disable_smp_support() - disables SMP support for x86 at runtime | ||
875 | */ | ||
876 | void arch_disable_smp_support(void) | ||
877 | { | ||
878 | disable_ioapic_support(); | ||
879 | } | ||
880 | |||
948 | /* | 881 | /* |
949 | * Fall back to non SMP mode after errors. | 882 | * Fall back to non SMP mode after errors. |
950 | * | 883 | * |
@@ -960,7 +893,6 @@ static __init void disable_smp(void) | |||
960 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); | 893 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); |
961 | else | 894 | else |
962 | physid_set_mask_of_physid(0, &phys_cpu_present_map); | 895 | physid_set_mask_of_physid(0, &phys_cpu_present_map); |
963 | map_cpu_to_logical_apicid(); | ||
964 | cpumask_set_cpu(0, cpu_sibling_mask(0)); | 896 | cpumask_set_cpu(0, cpu_sibling_mask(0)); |
965 | cpumask_set_cpu(0, cpu_core_mask(0)); | 897 | cpumask_set_cpu(0, cpu_core_mask(0)); |
966 | } | 898 | } |
@@ -1045,7 +977,7 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
1045 | "(tell your hw vendor)\n"); | 977 | "(tell your hw vendor)\n"); |
1046 | } | 978 | } |
1047 | smpboot_clear_io_apic(); | 979 | smpboot_clear_io_apic(); |
1048 | arch_disable_smp_support(); | 980 | disable_ioapic_support(); |
1049 | return -1; | 981 | return -1; |
1050 | } | 982 | } |
1051 | 983 | ||
@@ -1089,21 +1021,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1089 | 1021 | ||
1090 | preempt_disable(); | 1022 | preempt_disable(); |
1091 | smp_cpu_index_default(); | 1023 | smp_cpu_index_default(); |
1092 | memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info)); | 1024 | |
1093 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); | ||
1094 | mb(); | ||
1095 | /* | 1025 | /* |
1096 | * Setup boot CPU information | 1026 | * Setup boot CPU information |
1097 | */ | 1027 | */ |
1098 | smp_store_cpu_info(0); /* Final full version of the data */ | 1028 | smp_store_cpu_info(0); /* Final full version of the data */ |
1099 | #ifdef CONFIG_X86_32 | 1029 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); |
1100 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1030 | mb(); |
1101 | #endif | 1031 | |
1102 | current_thread_info()->cpu = 0; /* needed? */ | 1032 | current_thread_info()->cpu = 0; /* needed? */ |
1103 | for_each_possible_cpu(i) { | 1033 | for_each_possible_cpu(i) { |
1104 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | 1034 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
1105 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | 1035 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); |
1106 | zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); | 1036 | zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); |
1107 | } | 1037 | } |
1108 | set_cpu_sibling_map(0); | 1038 | set_cpu_sibling_map(0); |
1109 | 1039 | ||
@@ -1139,8 +1069,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1139 | 1069 | ||
1140 | bsp_end_local_APIC_setup(); | 1070 | bsp_end_local_APIC_setup(); |
1141 | 1071 | ||
1142 | map_cpu_to_logical_apicid(); | ||
1143 | |||
1144 | if (apic->setup_portio_remap) | 1072 | if (apic->setup_portio_remap) |
1145 | apic->setup_portio_remap(); | 1073 | apic->setup_portio_remap(); |
1146 | 1074 | ||
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 938c8e10a19a..6515733a289d 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -73,7 +73,7 @@ static const struct stacktrace_ops save_stack_ops_nosched = { | |||
73 | */ | 73 | */ |
74 | void save_stack_trace(struct stack_trace *trace) | 74 | void save_stack_trace(struct stack_trace *trace) |
75 | { | 75 | { |
76 | dump_trace(current, NULL, NULL, &save_stack_ops, trace); | 76 | dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace); |
77 | if (trace->nr_entries < trace->max_entries) | 77 | if (trace->nr_entries < trace->max_entries) |
78 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 78 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
79 | } | 79 | } |
@@ -81,14 +81,14 @@ EXPORT_SYMBOL_GPL(save_stack_trace); | |||
81 | 81 | ||
82 | void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs) | 82 | void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs) |
83 | { | 83 | { |
84 | dump_trace(current, regs, NULL, &save_stack_ops, trace); | 84 | dump_trace(current, regs, NULL, 0, &save_stack_ops, trace); |
85 | if (trace->nr_entries < trace->max_entries) | 85 | if (trace->nr_entries < trace->max_entries) |
86 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 86 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
87 | } | 87 | } |
88 | 88 | ||
89 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 89 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
90 | { | 90 | { |
91 | dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace); | 91 | dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace); |
92 | if (trace->nr_entries < trace->max_entries) | 92 | if (trace->nr_entries < trace->max_entries) |
93 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 93 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
94 | } | 94 | } |
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 58de45ee08b6..7977f0cfe339 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c | |||
@@ -166,7 +166,7 @@ static void enable_step(struct task_struct *child, bool block) | |||
166 | * Make sure block stepping (BTF) is not enabled unless it should be. | 166 | * Make sure block stepping (BTF) is not enabled unless it should be. |
167 | * Note that we don't try to worry about any is_setting_trap_flag() | 167 | * Note that we don't try to worry about any is_setting_trap_flag() |
168 | * instructions after the first when using block stepping. | 168 | * instructions after the first when using block stepping. |
169 | * So noone should try to use debugger block stepping in a program | 169 | * So no one should try to use debugger block stepping in a program |
170 | * that uses user-mode single stepping itself. | 170 | * that uses user-mode single stepping itself. |
171 | */ | 171 | */ |
172 | if (enable_single_step(child) && block) { | 172 | if (enable_single_step(child) && block) { |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index b35786dc9b8f..abce34d5c79d 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -340,3 +340,7 @@ ENTRY(sys_call_table) | |||
340 | .long sys_fanotify_init | 340 | .long sys_fanotify_init |
341 | .long sys_fanotify_mark | 341 | .long sys_fanotify_mark |
342 | .long sys_prlimit64 /* 340 */ | 342 | .long sys_prlimit64 /* 340 */ |
343 | .long sys_name_to_handle_at | ||
344 | .long sys_open_by_handle_at | ||
345 | .long sys_clock_adjtime | ||
346 | .long sys_syncfs | ||
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index 7e4515957a1c..8927486a4649 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c | |||
@@ -39,7 +39,7 @@ int __ref arch_register_cpu(int num) | |||
39 | /* | 39 | /* |
40 | * CPU0 cannot be offlined due to several | 40 | * CPU0 cannot be offlined due to several |
41 | * restrictions and assumptions in kernel. This basically | 41 | * restrictions and assumptions in kernel. This basically |
42 | * doesnt add a control file, one cannot attempt to offline | 42 | * doesn't add a control file, one cannot attempt to offline |
43 | * BSP. | 43 | * BSP. |
44 | * | 44 | * |
45 | * Also certain PCI quirks require not to enable hotplug control | 45 | * Also certain PCI quirks require not to enable hotplug control |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index a375616d77f7..a91ae7709b49 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
@@ -2,39 +2,41 @@ | |||
2 | #include <linux/memblock.h> | 2 | #include <linux/memblock.h> |
3 | 3 | ||
4 | #include <asm/trampoline.h> | 4 | #include <asm/trampoline.h> |
5 | #include <asm/cacheflush.h> | ||
5 | #include <asm/pgtable.h> | 6 | #include <asm/pgtable.h> |
6 | 7 | ||
7 | #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) | 8 | unsigned char *x86_trampoline_base; |
8 | #define __trampinit | ||
9 | #define __trampinitdata | ||
10 | #else | ||
11 | #define __trampinit __cpuinit | ||
12 | #define __trampinitdata __cpuinitdata | ||
13 | #endif | ||
14 | 9 | ||
15 | /* ready for x86_64 and x86 */ | 10 | void __init setup_trampolines(void) |
16 | unsigned char *__trampinitdata trampoline_base; | ||
17 | |||
18 | void __init reserve_trampoline_memory(void) | ||
19 | { | 11 | { |
20 | phys_addr_t mem; | 12 | phys_addr_t mem; |
13 | size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start); | ||
21 | 14 | ||
22 | /* Has to be in very low memory so we can execute real-mode AP code. */ | 15 | /* Has to be in very low memory so we can execute real-mode AP code. */ |
23 | mem = memblock_find_in_range(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); | 16 | mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); |
24 | if (mem == MEMBLOCK_ERROR) | 17 | if (mem == MEMBLOCK_ERROR) |
25 | panic("Cannot allocate trampoline\n"); | 18 | panic("Cannot allocate trampoline\n"); |
26 | 19 | ||
27 | trampoline_base = __va(mem); | 20 | x86_trampoline_base = __va(mem); |
28 | memblock_x86_reserve_range(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); | 21 | memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE"); |
22 | |||
23 | printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", | ||
24 | x86_trampoline_base, (unsigned long long)mem, size); | ||
25 | |||
26 | memcpy(x86_trampoline_base, x86_trampoline_start, size); | ||
29 | } | 27 | } |
30 | 28 | ||
31 | /* | 29 | /* |
32 | * Currently trivial. Write the real->protected mode | 30 | * setup_trampolines() gets called very early, to guarantee the |
33 | * bootstrap into the page concerned. The caller | 31 | * availability of low memory. This is before the proper kernel page |
34 | * has made sure it's suitably aligned. | 32 | * tables are set up, so we cannot set page permissions in that |
33 | * function. Thus, we use an arch_initcall instead. | ||
35 | */ | 34 | */ |
36 | unsigned long __trampinit setup_trampoline(void) | 35 | static int __init configure_trampolines(void) |
37 | { | 36 | { |
38 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); | 37 | size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start); |
39 | return virt_to_phys(trampoline_base); | 38 | |
39 | set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT); | ||
40 | return 0; | ||
40 | } | 41 | } |
42 | arch_initcall(configure_trampolines); | ||
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index 8508237e8e43..451c0a7ef7fd 100644 --- a/arch/x86/kernel/trampoline_32.S +++ b/arch/x86/kernel/trampoline_32.S | |||
@@ -32,9 +32,11 @@ | |||
32 | #include <asm/segment.h> | 32 | #include <asm/segment.h> |
33 | #include <asm/page_types.h> | 33 | #include <asm/page_types.h> |
34 | 34 | ||
35 | /* We can free up trampoline after bootup if cpu hotplug is not supported. */ | 35 | #ifdef CONFIG_SMP |
36 | __CPUINITRODATA | 36 | |
37 | .code16 | 37 | .section ".x86_trampoline","a" |
38 | .balign PAGE_SIZE | ||
39 | .code16 | ||
38 | 40 | ||
39 | ENTRY(trampoline_data) | 41 | ENTRY(trampoline_data) |
40 | r_base = . | 42 | r_base = . |
@@ -44,7 +46,7 @@ r_base = . | |||
44 | 46 | ||
45 | cli # We should be safe anyway | 47 | cli # We should be safe anyway |
46 | 48 | ||
47 | movl $0xA5A5A5A5, trampoline_data - r_base | 49 | movl $0xA5A5A5A5, trampoline_status - r_base |
48 | # write marker for master knows we're running | 50 | # write marker for master knows we're running |
49 | 51 | ||
50 | /* GDT tables in non default location kernel can be beyond 16MB and | 52 | /* GDT tables in non default location kernel can be beyond 16MB and |
@@ -72,5 +74,10 @@ boot_idt_descr: | |||
72 | .word 0 # idt limit = 0 | 74 | .word 0 # idt limit = 0 |
73 | .long 0 # idt base = 0L | 75 | .long 0 # idt base = 0L |
74 | 76 | ||
77 | ENTRY(trampoline_status) | ||
78 | .long 0 | ||
79 | |||
75 | .globl trampoline_end | 80 | .globl trampoline_end |
76 | trampoline_end: | 81 | trampoline_end: |
82 | |||
83 | #endif /* CONFIG_SMP */ | ||
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 075d130efcf9..09ff51799e96 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S | |||
@@ -32,13 +32,9 @@ | |||
32 | #include <asm/segment.h> | 32 | #include <asm/segment.h> |
33 | #include <asm/processor-flags.h> | 33 | #include <asm/processor-flags.h> |
34 | 34 | ||
35 | #ifdef CONFIG_ACPI_SLEEP | 35 | .section ".x86_trampoline","a" |
36 | .section .rodata, "a", @progbits | 36 | .balign PAGE_SIZE |
37 | #else | 37 | .code16 |
38 | /* We can free up the trampoline after bootup if cpu hotplug is not supported. */ | ||
39 | __CPUINITRODATA | ||
40 | #endif | ||
41 | .code16 | ||
42 | 38 | ||
43 | ENTRY(trampoline_data) | 39 | ENTRY(trampoline_data) |
44 | r_base = . | 40 | r_base = . |
@@ -50,7 +46,7 @@ r_base = . | |||
50 | mov %ax, %ss | 46 | mov %ax, %ss |
51 | 47 | ||
52 | 48 | ||
53 | movl $0xA5A5A5A5, trampoline_data - r_base | 49 | movl $0xA5A5A5A5, trampoline_status - r_base |
54 | # write marker for master knows we're running | 50 | # write marker for master knows we're running |
55 | 51 | ||
56 | # Setup stack | 52 | # Setup stack |
@@ -64,10 +60,13 @@ r_base = . | |||
64 | movzx %ax, %esi # Find the 32bit trampoline location | 60 | movzx %ax, %esi # Find the 32bit trampoline location |
65 | shll $4, %esi | 61 | shll $4, %esi |
66 | 62 | ||
67 | # Fixup the vectors | 63 | # Fixup the absolute vectors |
68 | addl %esi, startup_32_vector - r_base | 64 | leal (startup_32 - r_base)(%esi), %eax |
69 | addl %esi, startup_64_vector - r_base | 65 | movl %eax, startup_32_vector - r_base |
70 | addl %esi, tgdt + 2 - r_base # Fixup the gdt pointer | 66 | leal (startup_64 - r_base)(%esi), %eax |
67 | movl %eax, startup_64_vector - r_base | ||
68 | leal (tgdt - r_base)(%esi), %eax | ||
69 | movl %eax, (tgdt + 2 - r_base) | ||
71 | 70 | ||
72 | /* | 71 | /* |
73 | * GDT tables in non default location kernel can be beyond 16MB and | 72 | * GDT tables in non default location kernel can be beyond 16MB and |
@@ -129,6 +128,7 @@ no_longmode: | |||
129 | jmp no_longmode | 128 | jmp no_longmode |
130 | #include "verify_cpu.S" | 129 | #include "verify_cpu.S" |
131 | 130 | ||
131 | .balign 4 | ||
132 | # Careful these need to be in the same 64K segment as the above; | 132 | # Careful these need to be in the same 64K segment as the above; |
133 | tidt: | 133 | tidt: |
134 | .word 0 # idt limit = 0 | 134 | .word 0 # idt limit = 0 |
@@ -156,6 +156,10 @@ startup_64_vector: | |||
156 | .long startup_64 - r_base | 156 | .long startup_64 - r_base |
157 | .word __KERNEL_CS, 0 | 157 | .word __KERNEL_CS, 0 |
158 | 158 | ||
159 | .balign 4 | ||
160 | ENTRY(trampoline_status) | ||
161 | .long 0 | ||
162 | |||
159 | trampoline_stack: | 163 | trampoline_stack: |
160 | .org 0x1000 | 164 | .org 0x1000 |
161 | trampoline_stack_end: | 165 | trampoline_stack_end: |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ffe5755caa8b..9335bf7dd2e7 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -427,7 +427,7 @@ unsigned long native_calibrate_tsc(void) | |||
427 | * the delta to the previous read. We keep track of the min | 427 | * the delta to the previous read. We keep track of the min |
428 | * and max values of that delta. The delta is mostly defined | 428 | * and max values of that delta. The delta is mostly defined |
429 | * by the IO time of the PIT access, so we can detect when a | 429 | * by the IO time of the PIT access, so we can detect when a |
430 | * SMI/SMM disturbance happend between the two reads. If the | 430 | * SMI/SMM disturbance happened between the two reads. If the |
431 | * maximum time is significantly larger than the minimum time, | 431 | * maximum time is significantly larger than the minimum time, |
432 | * then we discard the result and have another try. | 432 | * then we discard the result and have another try. |
433 | * | 433 | * |
@@ -900,7 +900,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); | |||
900 | * timer based, instead of loop based, we don't block the boot | 900 | * timer based, instead of loop based, we don't block the boot |
901 | * process while this longer calibration is done. | 901 | * process while this longer calibration is done. |
902 | * | 902 | * |
903 | * If there are any calibration anomolies (too many SMIs, etc), | 903 | * If there are any calibration anomalies (too many SMIs, etc), |
904 | * or the refined calibration is off by 1% of the fast early | 904 | * or the refined calibration is off by 1% of the fast early |
905 | * calibration, we throw out the new calibration and use the | 905 | * calibration, we throw out the new calibration and use the |
906 | * early calibration. | 906 | * early calibration. |
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S index 0edefc19a113..b9242bacbe59 100644 --- a/arch/x86/kernel/verify_cpu.S +++ b/arch/x86/kernel/verify_cpu.S | |||
@@ -18,7 +18,7 @@ | |||
18 | * This file is expected to run in 32bit code. Currently: | 18 | * This file is expected to run in 32bit code. Currently: |
19 | * | 19 | * |
20 | * arch/x86/boot/compressed/head_64.S: Boot cpu verification | 20 | * arch/x86/boot/compressed/head_64.S: Boot cpu verification |
21 | * arch/x86/kernel/trampoline_64.S: secondary processor verfication | 21 | * arch/x86/kernel/trampoline_64.S: secondary processor verification |
22 | * arch/x86/kernel/head_32.S: processor startup | 22 | * arch/x86/kernel/head_32.S: processor startup |
23 | * | 23 | * |
24 | * verify_cpu, returns the status of longmode and SSE in register %eax. | 24 | * verify_cpu, returns the status of longmode and SSE in register %eax. |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index bf4700755184..624a2016198e 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -105,6 +105,7 @@ SECTIONS | |||
105 | SCHED_TEXT | 105 | SCHED_TEXT |
106 | LOCK_TEXT | 106 | LOCK_TEXT |
107 | KPROBES_TEXT | 107 | KPROBES_TEXT |
108 | ENTRY_TEXT | ||
108 | IRQENTRY_TEXT | 109 | IRQENTRY_TEXT |
109 | *(.fixup) | 110 | *(.fixup) |
110 | *(.gnu.warning) | 111 | *(.gnu.warning) |
@@ -230,7 +231,7 @@ SECTIONS | |||
230 | * output PHDR, so the next output section - .init.text - should | 231 | * output PHDR, so the next output section - .init.text - should |
231 | * start another segment - init. | 232 | * start another segment - init. |
232 | */ | 233 | */ |
233 | PERCPU_VADDR(0, :percpu) | 234 | PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) |
234 | #endif | 235 | #endif |
235 | 236 | ||
236 | INIT_TEXT_SECTION(PAGE_SIZE) | 237 | INIT_TEXT_SECTION(PAGE_SIZE) |
@@ -240,6 +241,18 @@ SECTIONS | |||
240 | 241 | ||
241 | INIT_DATA_SECTION(16) | 242 | INIT_DATA_SECTION(16) |
242 | 243 | ||
244 | /* | ||
245 | * Code and data for a variety of lowlevel trampolines, to be | ||
246 | * copied into base memory (< 1 MiB) during initialization. | ||
247 | * Since it is copied early, the main copy can be discarded | ||
248 | * afterwards. | ||
249 | */ | ||
250 | .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) { | ||
251 | x86_trampoline_start = .; | ||
252 | *(.x86_trampoline) | ||
253 | x86_trampoline_end = .; | ||
254 | } | ||
255 | |||
243 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { | 256 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
244 | __x86_cpu_dev_start = .; | 257 | __x86_cpu_dev_start = .; |
245 | *(.x86_cpu_dev.init) | 258 | *(.x86_cpu_dev.init) |
@@ -291,6 +304,7 @@ SECTIONS | |||
291 | *(.iommu_table) | 304 | *(.iommu_table) |
292 | __iommu_table_end = .; | 305 | __iommu_table_end = .; |
293 | } | 306 | } |
307 | |||
294 | . = ALIGN(8); | 308 | . = ALIGN(8); |
295 | /* | 309 | /* |
296 | * .exit.text is discard at runtime, not link time, to deal with | 310 | * .exit.text is discard at runtime, not link time, to deal with |
@@ -305,7 +319,7 @@ SECTIONS | |||
305 | } | 319 | } |
306 | 320 | ||
307 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) | 321 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) |
308 | PERCPU(THREAD_SIZE) | 322 | PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE) |
309 | #endif | 323 | #endif |
310 | 324 | ||
311 | . = ALIGN(PAGE_SIZE); | 325 | . = ALIGN(PAGE_SIZE); |
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 1b950d151e58..9796c2f3d074 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -52,6 +52,7 @@ extern void *__memcpy(void *, const void *, __kernel_size_t); | |||
52 | EXPORT_SYMBOL(memset); | 52 | EXPORT_SYMBOL(memset); |
53 | EXPORT_SYMBOL(memcpy); | 53 | EXPORT_SYMBOL(memcpy); |
54 | EXPORT_SYMBOL(__memcpy); | 54 | EXPORT_SYMBOL(__memcpy); |
55 | EXPORT_SYMBOL(memmove); | ||
55 | 56 | ||
56 | EXPORT_SYMBOL(empty_zero_page); | 57 | EXPORT_SYMBOL(empty_zero_page); |
57 | #ifndef CONFIG_PARAVIRT | 58 | #ifndef CONFIG_PARAVIRT |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index ceb2911aa439..c11514e9128b 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -70,6 +70,7 @@ struct x86_init_ops x86_init __initdata = { | |||
70 | .setup_percpu_clockev = setup_boot_APIC_clock, | 70 | .setup_percpu_clockev = setup_boot_APIC_clock, |
71 | .tsc_pre_init = x86_init_noop, | 71 | .tsc_pre_init = x86_init_noop, |
72 | .timer_init = hpet_time_init, | 72 | .timer_init = hpet_time_init, |
73 | .wallclock_init = x86_init_noop, | ||
73 | }, | 74 | }, |
74 | 75 | ||
75 | .iommu = { | 76 | .iommu = { |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 547128546cc3..a3911343976b 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -53,7 +53,7 @@ void __sanitize_i387_state(struct task_struct *tsk) | |||
53 | 53 | ||
54 | /* | 54 | /* |
55 | * None of the feature bits are in init state. So nothing else | 55 | * None of the feature bits are in init state. So nothing else |
56 | * to do for us, as the memory layout is upto date. | 56 | * to do for us, as the memory layout is up to date. |
57 | */ | 57 | */ |
58 | if ((xstate_bv & pcntxt_mask) == pcntxt_mask) | 58 | if ((xstate_bv & pcntxt_mask) == pcntxt_mask) |
59 | return; | 59 | return; |