diff options
34 files changed, 208 insertions, 211 deletions
diff --git a/Documentation/i386/IO-APIC.txt b/Documentation/i386/IO-APIC.txt index 435e69e6e9aa..f95166645d29 100644 --- a/Documentation/i386/IO-APIC.txt +++ b/Documentation/i386/IO-APIC.txt | |||
@@ -1,12 +1,14 @@ | |||
1 | Most (all) Intel-MP compliant SMP boards have the so-called 'IO-APIC', | 1 | Most (all) Intel-MP compliant SMP boards have the so-called 'IO-APIC', |
2 | which is an enhanced interrupt controller, it enables us to route | 2 | which is an enhanced interrupt controller. It enables us to route |
3 | hardware interrupts to multiple CPUs, or to CPU groups. | 3 | hardware interrupts to multiple CPUs, or to CPU groups. Without an |
4 | IO-APIC, interrupts from hardware will be delivered only to the | ||
5 | CPU which boots the operating system (usually CPU#0). | ||
4 | 6 | ||
5 | Linux supports all variants of compliant SMP boards, including ones with | 7 | Linux supports all variants of compliant SMP boards, including ones with |
6 | multiple IO-APICs. (multiple IO-APICs are used in high-end servers to | 8 | multiple IO-APICs. Multiple IO-APICs are used in high-end servers to |
7 | distribute IRQ load further). | 9 | distribute IRQ load further. |
8 | 10 | ||
9 | There are (a few) known breakages in certain older boards, which bugs are | 11 | There are (a few) known breakages in certain older boards, such bugs are |
10 | usually worked around by the kernel. If your MP-compliant SMP board does | 12 | usually worked around by the kernel. If your MP-compliant SMP board does |
11 | not boot Linux, then consult the linux-smp mailing list archives first. | 13 | not boot Linux, then consult the linux-smp mailing list archives first. |
12 | 14 | ||
@@ -28,18 +30,18 @@ If your box boots fine with enabled IO-APIC IRQs, then your | |||
28 | hell:~> | 30 | hell:~> |
29 | <---------------------------- | 31 | <---------------------------- |
30 | 32 | ||
31 | some interrupts are still listed as 'XT PIC', but this is not a problem, | 33 | Some interrupts are still listed as 'XT PIC', but this is not a problem; |
32 | none of those IRQ sources is performance-critical. | 34 | none of those IRQ sources is performance-critical. |
33 | 35 | ||
34 | 36 | ||
35 | in the unlikely case that your board does not create a working mp-table, | 37 | In the unlikely case that your board does not create a working mp-table, |
36 | you can use the pirq= boot parameter to 'hand-construct' IRQ entries. This | 38 | you can use the pirq= boot parameter to 'hand-construct' IRQ entries. This |
37 | is nontrivial though and cannot be automated. One sample /etc/lilo.conf | 39 | is non-trivial though and cannot be automated. One sample /etc/lilo.conf |
38 | entry: | 40 | entry: |
39 | 41 | ||
40 | append="pirq=15,11,10" | 42 | append="pirq=15,11,10" |
41 | 43 | ||
42 | the actual numbers depend on your system, on your PCI cards and on their | 44 | The actual numbers depend on your system, on your PCI cards and on their |
43 | PCI slot position. Usually PCI slots are 'daisy chained' before they are | 45 | PCI slot position. Usually PCI slots are 'daisy chained' before they are |
44 | connected to the PCI chipset IRQ routing facility (the incoming PIRQ1-4 | 46 | connected to the PCI chipset IRQ routing facility (the incoming PIRQ1-4 |
45 | lines): | 47 | lines): |
@@ -54,7 +56,7 @@ lines): | |||
54 | PIRQ1 ----| |- `----| |- `----| |- `----| |--------| | | 56 | PIRQ1 ----| |- `----| |- `----| |- `----| |--------| | |
55 | `-' `-' `-' `-' `-' | 57 | `-' `-' `-' `-' `-' |
56 | 58 | ||
57 | every PCI card emits a PCI IRQ, which can be INTA,INTB,INTC,INTD: | 59 | Every PCI card emits a PCI IRQ, which can be INTA, INTB, INTC or INTD: |
58 | 60 | ||
59 | ,-. | 61 | ,-. |
60 | INTD--| | | 62 | INTD--| | |
@@ -95,21 +97,21 @@ card (IRQ11) in Slot3, and have Slot1 empty: | |||
95 | [value '0' is a generic 'placeholder', reserved for empty (or non-IRQ emitting) | 97 | [value '0' is a generic 'placeholder', reserved for empty (or non-IRQ emitting) |
96 | slots.] | 98 | slots.] |
97 | 99 | ||
98 | generally, it's always possible to find out the correct pirq= settings, just | 100 | Generally, it's always possible to find out the correct pirq= settings, just |
99 | permute all IRQ numbers properly ... it will take some time though. An | 101 | permute all IRQ numbers properly ... it will take some time though. An |
100 | 'incorrect' pirq line will cause the booting process to hang, or a device | 102 | 'incorrect' pirq line will cause the booting process to hang, or a device |
101 | won't function properly (if it's inserted as eg. a module). | 103 | won't function properly (e.g. if it's inserted as a module). |
102 | 104 | ||
103 | If you have 2 PCI buses, then you can use up to 8 pirq values. Although such | 105 | If you have 2 PCI buses, then you can use up to 8 pirq values, although such |
104 | boards tend to have a good configuration. | 106 | boards tend to have a good configuration. |
105 | 107 | ||
106 | Be prepared that it might happen that you need some strange pirq line: | 108 | Be prepared that it might happen that you need some strange pirq line: |
107 | 109 | ||
108 | append="pirq=0,0,0,0,0,0,9,11" | 110 | append="pirq=0,0,0,0,0,0,9,11" |
109 | 111 | ||
110 | use smart try-and-err techniques to find out the correct pirq line ... | 112 | Use smart trial-and-error techniques to find out the correct pirq line ... |
111 | 113 | ||
112 | good luck and mail to linux-smp@vger.kernel.org or | 114 | Good luck and mail to linux-smp@vger.kernel.org or |
113 | linux-kernel@vger.kernel.org if you have any problems that are not covered | 115 | linux-kernel@vger.kernel.org if you have any problems that are not covered |
114 | by this document. | 116 | by this document. |
115 | 117 | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a4fc7fc21439..7278295f94d2 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1056,8 +1056,6 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1056 | [SCSI] Maximum number of LUNs received. | 1056 | [SCSI] Maximum number of LUNs received. |
1057 | Should be between 1 and 16384. | 1057 | Should be between 1 and 16384. |
1058 | 1058 | ||
1059 | mca-pentium [BUGS=X86-32] | ||
1060 | |||
1061 | mcatest= [IA-64] | 1059 | mcatest= [IA-64] |
1062 | 1060 | ||
1063 | mce [X86-32] Machine Check Exception | 1061 | mce [X86-32] Machine Check Exception |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 6845482f0093..1c6ce3536e4c 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -176,7 +176,7 @@ define archhelp | |||
176 | @echo ' *_defconfig - Select default config from arch/$(ARCH)/configs' | 176 | @echo ' *_defconfig - Select default config from arch/$(ARCH)/configs' |
177 | endef | 177 | endef |
178 | 178 | ||
179 | install: vdso_install | 179 | install: |
180 | $(Q)$(MAKE) $(build)=$(boot) install | 180 | $(Q)$(MAKE) $(build)=$(boot) install |
181 | 181 | ||
182 | vdso_install: | 182 | vdso_install: |
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 864affc9a7b0..702eb39901ca 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
@@ -156,7 +156,7 @@ config IO_DELAY_TYPE_NONE | |||
156 | 156 | ||
157 | choice | 157 | choice |
158 | prompt "IO delay type" | 158 | prompt "IO delay type" |
159 | default IO_DELAY_0XED | 159 | default IO_DELAY_0X80 |
160 | 160 | ||
161 | config IO_DELAY_0X80 | 161 | config IO_DELAY_0X80 |
162 | bool "port 0x80 based port-IO delay [recommended]" | 162 | bool "port 0x80 based port-IO delay [recommended]" |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 204af43535c5..f1e739a43d41 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -229,7 +229,7 @@ zdisk bzdisk: vmlinux | |||
229 | fdimage fdimage144 fdimage288 isoimage: vmlinux | 229 | fdimage fdimage144 fdimage288 isoimage: vmlinux |
230 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ | 230 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ |
231 | 231 | ||
232 | install: vdso_install | 232 | install: |
233 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install | 233 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install |
234 | 234 | ||
235 | PHONY += vdso_install | 235 | PHONY += vdso_install |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 76ec0f8f138a..4eb5ce841106 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -6,7 +6,15 @@ extra-y := head_$(BITS).o init_task.o vmlinux.lds | |||
6 | extra-$(CONFIG_X86_64) += head64.o | 6 | extra-$(CONFIG_X86_64) += head64.o |
7 | 7 | ||
8 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) | 8 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) |
9 | CFLAGS_vsyscall_64.o := $(PROFILING) -g0 | 9 | |
10 | # | ||
11 | # vsyscalls (which work on the user stack) should have | ||
12 | # no stack-protector checks: | ||
13 | # | ||
14 | nostackp := $(call cc-option, -fno-stack-protector) | ||
15 | CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) | ||
16 | CFLAGS_hpet.o := $(nostackp) | ||
17 | CFLAGS_tsc_64.o := $(nostackp) | ||
10 | 18 | ||
11 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o | 19 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o |
12 | obj-y += traps_$(BITS).o irq_$(BITS).o | 20 | obj-y += traps_$(BITS).o irq_$(BITS).o |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 9b95edcfc6ae..027e5c003b16 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -25,14 +25,6 @@ static int __init no_halt(char *s) | |||
25 | 25 | ||
26 | __setup("no-hlt", no_halt); | 26 | __setup("no-hlt", no_halt); |
27 | 27 | ||
28 | static int __init mca_pentium(char *s) | ||
29 | { | ||
30 | mca_pentium_flag = 1; | ||
31 | return 1; | ||
32 | } | ||
33 | |||
34 | __setup("mca-pentium", mca_pentium); | ||
35 | |||
36 | static int __init no_387(char *s) | 28 | static int __init no_387(char *s) |
37 | { | 29 | { |
38 | boot_cpu_data.hard_math = 0; | 30 | boot_cpu_data.hard_math = 0; |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 0c0eeb163d90..759e02bec070 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(efi); | |||
54 | 54 | ||
55 | struct efi_memory_map memmap; | 55 | struct efi_memory_map memmap; |
56 | 56 | ||
57 | struct efi efi_phys __initdata; | 57 | static struct efi efi_phys __initdata; |
58 | static efi_system_table_t efi_systab __initdata; | 58 | static efi_system_table_t efi_systab __initdata; |
59 | 59 | ||
60 | static int __init setup_noefi(char *arg) | 60 | static int __init setup_noefi(char *arg) |
diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c index cb91f985b4a1..5d23d85624d4 100644 --- a/arch/x86/kernel/efi_32.c +++ b/arch/x86/kernel/efi_32.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/page.h> | 28 | #include <asm/page.h> |
29 | #include <asm/pgtable.h> | 29 | #include <asm/pgtable.h> |
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | #include <asm/efi.h> | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * To make EFI call EFI runtime service in physical addressing mode we need | 34 | * To make EFI call EFI runtime service in physical addressing mode we need |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 824e21b80aad..4b87c32b639f 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -409,7 +409,7 @@ restore_nocheck_notrace: | |||
409 | RESTORE_REGS | 409 | RESTORE_REGS |
410 | addl $4, %esp # skip orig_eax/error_code | 410 | addl $4, %esp # skip orig_eax/error_code |
411 | CFI_ADJUST_CFA_OFFSET -4 | 411 | CFI_ADJUST_CFA_OFFSET -4 |
412 | ENTRY(irq_return) | 412 | irq_return: |
413 | INTERRUPT_RETURN | 413 | INTERRUPT_RETURN |
414 | .section .fixup,"ax" | 414 | .section .fixup,"ax" |
415 | iret_exc: | 415 | iret_exc: |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 6be39a387c5a..2ad9a1bc6a73 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -583,7 +583,7 @@ retint_restore_args: /* return to kernel space */ | |||
583 | restore_args: | 583 | restore_args: |
584 | RESTORE_ARGS 0,8,0 | 584 | RESTORE_ARGS 0,8,0 |
585 | 585 | ||
586 | ENTRY(irq_return) | 586 | irq_return: |
587 | INTERRUPT_RETURN | 587 | INTERRUPT_RETURN |
588 | 588 | ||
589 | .section __ex_table, "a" | 589 | .section __ex_table, "a" |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 74ef4a41f224..25eb98540a41 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -612,7 +612,7 @@ ENTRY(swapper_pg_pmd) | |||
612 | ENTRY(swapper_pg_dir) | 612 | ENTRY(swapper_pg_dir) |
613 | .fill 1024,4,0 | 613 | .fill 1024,4,0 |
614 | #endif | 614 | #endif |
615 | ENTRY(swapper_pg_fixmap) | 615 | swapper_pg_fixmap: |
616 | .fill 1024,4,0 | 616 | .fill 1024,4,0 |
617 | ENTRY(empty_zero_page) | 617 | ENTRY(empty_zero_page) |
618 | .fill 4096,1,0 | 618 | .fill 4096,1,0 |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 53e5820d6054..eb415043a929 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -255,7 +255,7 @@ ENTRY(secondary_startup_64) | |||
255 | lretq | 255 | lretq |
256 | 256 | ||
257 | /* SMP bootup changes these two */ | 257 | /* SMP bootup changes these two */ |
258 | __CPUINITDATA | 258 | __REFDATA |
259 | .align 8 | 259 | .align 8 |
260 | ENTRY(initial_code) | 260 | ENTRY(initial_code) |
261 | .quad x86_64_start_kernel | 261 | .quad x86_64_start_kernel |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 26719bd2c77c..763dfc407232 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #define HAVE_HWFP 1 | 39 | #define HAVE_HWFP 1 |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 42 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
43 | 43 | ||
44 | void mxcsr_feature_mask_init(void) | 44 | void mxcsr_feature_mask_init(void) |
45 | { | 45 | { |
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c index 2d25b77102fe..fe631967d625 100644 --- a/arch/x86/kernel/i8259_32.c +++ b/arch/x86/kernel/i8259_32.c | |||
@@ -26,8 +26,6 @@ | |||
26 | * present in the majority of PC/AT boxes. | 26 | * present in the majority of PC/AT boxes. |
27 | * plus some generic x86 specific things if generic specifics makes | 27 | * plus some generic x86 specific things if generic specifics makes |
28 | * any sense at all. | 28 | * any sense at all. |
29 | * this file should become arch/i386/kernel/irq.c when the old irq.c | ||
30 | * moves to arch independent land | ||
31 | */ | 29 | */ |
32 | 30 | ||
33 | static int i8259A_auto_eoi; | 31 | static int i8259A_auto_eoi; |
@@ -362,23 +360,12 @@ void __init init_ISA_irqs (void) | |||
362 | #endif | 360 | #endif |
363 | init_8259A(0); | 361 | init_8259A(0); |
364 | 362 | ||
365 | for (i = 0; i < NR_IRQS; i++) { | 363 | /* |
366 | irq_desc[i].status = IRQ_DISABLED; | 364 | * 16 old-style INTA-cycle interrupts: |
367 | irq_desc[i].action = NULL; | 365 | */ |
368 | irq_desc[i].depth = 1; | 366 | for (i = 0; i < 16; i++) { |
369 | 367 | set_irq_chip_and_handler_name(i, &i8259A_chip, | |
370 | if (i < 16) { | 368 | handle_level_irq, "XT"); |
371 | /* | ||
372 | * 16 old-style INTA-cycle interrupts: | ||
373 | */ | ||
374 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
375 | handle_level_irq, "XT"); | ||
376 | } else { | ||
377 | /* | ||
378 | * 'high' PCI IRQs filled in on demand | ||
379 | */ | ||
380 | irq_desc[i].chip = &no_irq_chip; | ||
381 | } | ||
382 | } | 369 | } |
383 | } | 370 | } |
384 | 371 | ||
diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c index bd49321034db..c706a3061553 100644 --- a/arch/x86/kernel/io_delay.c +++ b/arch/x86/kernel/io_delay.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <asm/io.h> | 13 | #include <asm/io.h> |
14 | 14 | ||
15 | int io_delay_type __read_mostly = CONFIG_DEFAULT_IO_DELAY_TYPE; | 15 | int io_delay_type __read_mostly = CONFIG_DEFAULT_IO_DELAY_TYPE; |
16 | EXPORT_SYMBOL_GPL(io_delay_type); | ||
17 | 16 | ||
18 | static int __initdata io_delay_override; | 17 | static int __initdata io_delay_override; |
19 | 18 | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index a99e764fd66a..34a591283f5d 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -581,7 +581,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
581 | * When a retprobed function returns, this code saves registers and | 581 | * When a retprobed function returns, this code saves registers and |
582 | * calls trampoline_handler() runs, which calls the kretprobe's handler. | 582 | * calls trampoline_handler() runs, which calls the kretprobe's handler. |
583 | */ | 583 | */ |
584 | void __kprobes kretprobe_trampoline_holder(void) | 584 | static void __used __kprobes kretprobe_trampoline_holder(void) |
585 | { | 585 | { |
586 | asm volatile ( | 586 | asm volatile ( |
587 | ".global kretprobe_trampoline\n" | 587 | ".global kretprobe_trampoline\n" |
@@ -673,7 +673,7 @@ void __kprobes kretprobe_trampoline_holder(void) | |||
673 | /* | 673 | /* |
674 | * Called from kretprobe_trampoline | 674 | * Called from kretprobe_trampoline |
675 | */ | 675 | */ |
676 | void * __kprobes trampoline_handler(struct pt_regs *regs) | 676 | static __used __kprobes void *trampoline_handler(struct pt_regs *regs) |
677 | { | 677 | { |
678 | struct kretprobe_instance *ri = NULL; | 678 | struct kretprobe_instance *ri = NULL; |
679 | struct hlist_head *head, empty_rp; | 679 | struct hlist_head *head, empty_rp; |
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index edd413650b3b..6a0aa7038685 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c | |||
@@ -46,9 +46,6 @@ static unsigned int nmi_hz = HZ; | |||
46 | 46 | ||
47 | static DEFINE_PER_CPU(short, wd_enabled); | 47 | static DEFINE_PER_CPU(short, wd_enabled); |
48 | 48 | ||
49 | /* local prototypes */ | ||
50 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); | ||
51 | |||
52 | static int endflag __initdata = 0; | 49 | static int endflag __initdata = 0; |
53 | 50 | ||
54 | #ifdef CONFIG_SMP | 51 | #ifdef CONFIG_SMP |
@@ -391,15 +388,6 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
391 | return rc; | 388 | return rc; |
392 | } | 389 | } |
393 | 390 | ||
394 | int do_nmi_callback(struct pt_regs * regs, int cpu) | ||
395 | { | ||
396 | #ifdef CONFIG_SYSCTL | ||
397 | if (unknown_nmi_panic) | ||
398 | return unknown_nmi_panic_callback(regs, cpu); | ||
399 | #endif | ||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | #ifdef CONFIG_SYSCTL | 391 | #ifdef CONFIG_SYSCTL |
404 | 392 | ||
405 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) | 393 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) |
@@ -453,6 +441,15 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, | |||
453 | 441 | ||
454 | #endif | 442 | #endif |
455 | 443 | ||
444 | int do_nmi_callback(struct pt_regs *regs, int cpu) | ||
445 | { | ||
446 | #ifdef CONFIG_SYSCTL | ||
447 | if (unknown_nmi_panic) | ||
448 | return unknown_nmi_panic_callback(regs, cpu); | ||
449 | #endif | ||
450 | return 0; | ||
451 | } | ||
452 | |||
456 | void __trigger_all_cpu_backtrace(void) | 453 | void __trigger_all_cpu_backtrace(void) |
457 | { | 454 | { |
458 | int i; | 455 | int i; |
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index fb99484d21cf..9a4fde74bee1 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c | |||
@@ -46,9 +46,6 @@ static unsigned int nmi_hz = HZ; | |||
46 | 46 | ||
47 | static DEFINE_PER_CPU(short, wd_enabled); | 47 | static DEFINE_PER_CPU(short, wd_enabled); |
48 | 48 | ||
49 | /* local prototypes */ | ||
50 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); | ||
51 | |||
52 | /* Run after command line and cpu_init init, but before all other checks */ | 49 | /* Run after command line and cpu_init init, but before all other checks */ |
53 | void nmi_watchdog_default(void) | 50 | void nmi_watchdog_default(void) |
54 | { | 51 | { |
@@ -394,15 +391,6 @@ asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code) | |||
394 | nmi_exit(); | 391 | nmi_exit(); |
395 | } | 392 | } |
396 | 393 | ||
397 | int do_nmi_callback(struct pt_regs * regs, int cpu) | ||
398 | { | ||
399 | #ifdef CONFIG_SYSCTL | ||
400 | if (unknown_nmi_panic) | ||
401 | return unknown_nmi_panic_callback(regs, cpu); | ||
402 | #endif | ||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | void stop_nmi(void) | 394 | void stop_nmi(void) |
407 | { | 395 | { |
408 | acpi_nmi_disable(); | 396 | acpi_nmi_disable(); |
@@ -464,6 +452,15 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, | |||
464 | 452 | ||
465 | #endif | 453 | #endif |
466 | 454 | ||
455 | int do_nmi_callback(struct pt_regs *regs, int cpu) | ||
456 | { | ||
457 | #ifdef CONFIG_SYSCTL | ||
458 | if (unknown_nmi_panic) | ||
459 | return unknown_nmi_panic_callback(regs, cpu); | ||
460 | #endif | ||
461 | return 0; | ||
462 | } | ||
463 | |||
467 | void __trigger_all_cpu_backtrace(void) | 464 | void __trigger_all_cpu_backtrace(void) |
468 | { | 465 | { |
469 | int i; | 466 | int i; |
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 691ab4cb167b..a1d7071a51c9 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c | |||
@@ -164,7 +164,6 @@ unsigned long mmu_cr4_features = X86_CR4_PAE; | |||
164 | unsigned int machine_id; | 164 | unsigned int machine_id; |
165 | unsigned int machine_submodel_id; | 165 | unsigned int machine_submodel_id; |
166 | unsigned int BIOS_revision; | 166 | unsigned int BIOS_revision; |
167 | unsigned int mca_pentium_flag; | ||
168 | 167 | ||
169 | /* Boot loader ID as an integer, for the benefit of proc_dointvec */ | 168 | /* Boot loader ID as an integer, for the benefit of proc_dointvec */ |
170 | int bootloader_type; | 169 | int bootloader_type; |
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index c0d8208af12a..6fd804f07821 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c | |||
@@ -518,7 +518,7 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
518 | } | 518 | } |
519 | 519 | ||
520 | #ifdef CONFIG_NUMA | 520 | #ifdef CONFIG_NUMA |
521 | static int nearby_node(int apicid) | 521 | static int __cpuinit nearby_node(int apicid) |
522 | { | 522 | { |
523 | int i, node; | 523 | int i, node; |
524 | 524 | ||
@@ -791,7 +791,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | |||
791 | return 1; | 791 | return 1; |
792 | } | 792 | } |
793 | 793 | ||
794 | static void srat_detect_node(void) | 794 | static void __cpuinit srat_detect_node(void) |
795 | { | 795 | { |
796 | #ifdef CONFIG_NUMA | 796 | #ifdef CONFIG_NUMA |
797 | unsigned node; | 797 | unsigned node; |
@@ -1046,7 +1046,7 @@ __setup("noclflush", setup_noclflush); | |||
1046 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 1046 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
1047 | { | 1047 | { |
1048 | if (c->x86_model_id[0]) | 1048 | if (c->x86_model_id[0]) |
1049 | printk(KERN_INFO "%s", c->x86_model_id); | 1049 | printk(KERN_CONT "%s", c->x86_model_id); |
1050 | 1050 | ||
1051 | if (c->x86_mask || c->cpuid_level >= 0) | 1051 | if (c->x86_mask || c->cpuid_level >= 0) |
1052 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | 1052 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index a40051b71d9b..0fcc95a354f7 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c | |||
@@ -34,7 +34,7 @@ | |||
34 | static DEFINE_PER_CPU(struct x86_cpu, cpu_devices); | 34 | static DEFINE_PER_CPU(struct x86_cpu, cpu_devices); |
35 | 35 | ||
36 | #ifdef CONFIG_HOTPLUG_CPU | 36 | #ifdef CONFIG_HOTPLUG_CPU |
37 | int arch_register_cpu(int num) | 37 | int __ref arch_register_cpu(int num) |
38 | { | 38 | { |
39 | /* | 39 | /* |
40 | * CPU0 cannot be offlined due to several | 40 | * CPU0 cannot be offlined due to several |
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index f1148ac8abe3..2ffa9656fe7a 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S | |||
@@ -38,7 +38,7 @@ SECTIONS | |||
38 | 38 | ||
39 | /* read-only */ | 39 | /* read-only */ |
40 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | 40 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
41 | . = ALIGN(4096); /* not really needed, already page aligned */ | 41 | . = ALIGN(PAGE_SIZE); /* not really needed, already page aligned */ |
42 | *(.text.page_aligned) | 42 | *(.text.page_aligned) |
43 | TEXT_TEXT | 43 | TEXT_TEXT |
44 | SCHED_TEXT | 44 | SCHED_TEXT |
@@ -70,21 +70,21 @@ SECTIONS | |||
70 | RODATA | 70 | RODATA |
71 | 71 | ||
72 | /* writeable */ | 72 | /* writeable */ |
73 | . = ALIGN(4096); | 73 | . = ALIGN(PAGE_SIZE); |
74 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ | 74 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ |
75 | DATA_DATA | 75 | DATA_DATA |
76 | CONSTRUCTORS | 76 | CONSTRUCTORS |
77 | } :data | 77 | } :data |
78 | 78 | ||
79 | . = ALIGN(4096); | 79 | . = ALIGN(PAGE_SIZE); |
80 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { | 80 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { |
81 | __nosave_begin = .; | 81 | __nosave_begin = .; |
82 | *(.data.nosave) | 82 | *(.data.nosave) |
83 | . = ALIGN(4096); | 83 | . = ALIGN(PAGE_SIZE); |
84 | __nosave_end = .; | 84 | __nosave_end = .; |
85 | } | 85 | } |
86 | 86 | ||
87 | . = ALIGN(4096); | 87 | . = ALIGN(PAGE_SIZE); |
88 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { | 88 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { |
89 | *(.data.page_aligned) | 89 | *(.data.page_aligned) |
90 | *(.data.idt) | 90 | *(.data.idt) |
@@ -108,7 +108,7 @@ SECTIONS | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /* might get freed after init */ | 110 | /* might get freed after init */ |
111 | . = ALIGN(4096); | 111 | . = ALIGN(PAGE_SIZE); |
112 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | 112 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { |
113 | __smp_locks = .; | 113 | __smp_locks = .; |
114 | *(.smp_locks) | 114 | *(.smp_locks) |
@@ -120,10 +120,10 @@ SECTIONS | |||
120 | * after boot. Always make sure that ALIGN() directive is present after | 120 | * after boot. Always make sure that ALIGN() directive is present after |
121 | * the section which contains __smp_alt_end. | 121 | * the section which contains __smp_alt_end. |
122 | */ | 122 | */ |
123 | . = ALIGN(4096); | 123 | . = ALIGN(PAGE_SIZE); |
124 | 124 | ||
125 | /* will be freed after init */ | 125 | /* will be freed after init */ |
126 | . = ALIGN(4096); /* Init code and data */ | 126 | . = ALIGN(PAGE_SIZE); /* Init code and data */ |
127 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { | 127 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { |
128 | __init_begin = .; | 128 | __init_begin = .; |
129 | _sinittext = .; | 129 | _sinittext = .; |
@@ -174,23 +174,23 @@ SECTIONS | |||
174 | EXIT_DATA | 174 | EXIT_DATA |
175 | } | 175 | } |
176 | #if defined(CONFIG_BLK_DEV_INITRD) | 176 | #if defined(CONFIG_BLK_DEV_INITRD) |
177 | . = ALIGN(4096); | 177 | . = ALIGN(PAGE_SIZE); |
178 | .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { | 178 | .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { |
179 | __initramfs_start = .; | 179 | __initramfs_start = .; |
180 | *(.init.ramfs) | 180 | *(.init.ramfs) |
181 | __initramfs_end = .; | 181 | __initramfs_end = .; |
182 | } | 182 | } |
183 | #endif | 183 | #endif |
184 | . = ALIGN(4096); | 184 | . = ALIGN(PAGE_SIZE); |
185 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { | 185 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { |
186 | __per_cpu_start = .; | 186 | __per_cpu_start = .; |
187 | *(.data.percpu) | 187 | *(.data.percpu) |
188 | *(.data.percpu.shared_aligned) | 188 | *(.data.percpu.shared_aligned) |
189 | __per_cpu_end = .; | 189 | __per_cpu_end = .; |
190 | } | 190 | } |
191 | . = ALIGN(4096); | 191 | . = ALIGN(PAGE_SIZE); |
192 | /* freed after init ends here */ | 192 | /* freed after init ends here */ |
193 | 193 | ||
194 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { | 194 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { |
195 | __init_end = .; | 195 | __init_end = .; |
196 | __bss_start = .; /* BSS */ | 196 | __bss_start = .; /* BSS */ |
@@ -200,7 +200,7 @@ SECTIONS | |||
200 | __bss_stop = .; | 200 | __bss_stop = .; |
201 | _end = . ; | 201 | _end = . ; |
202 | /* This is where the kernel creates the early boot page tables */ | 202 | /* This is where the kernel creates the early boot page tables */ |
203 | . = ALIGN(4096); | 203 | . = ALIGN(PAGE_SIZE); |
204 | pg0 = . ; | 204 | pg0 = . ; |
205 | } | 205 | } |
206 | 206 | ||
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 0992b9946c6f..fab132299735 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S | |||
@@ -37,7 +37,7 @@ SECTIONS | |||
37 | KPROBES_TEXT | 37 | KPROBES_TEXT |
38 | *(.fixup) | 38 | *(.fixup) |
39 | *(.gnu.warning) | 39 | *(.gnu.warning) |
40 | _etext = .; /* End of text section */ | 40 | _etext = .; /* End of text section */ |
41 | } :text = 0x9090 | 41 | } :text = 0x9090 |
42 | 42 | ||
43 | . = ALIGN(16); /* Exception table */ | 43 | . = ALIGN(16); /* Exception table */ |
@@ -60,7 +60,7 @@ SECTIONS | |||
60 | __tracedata_end = .; | 60 | __tracedata_end = .; |
61 | } | 61 | } |
62 | 62 | ||
63 | . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ | 63 | . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ |
64 | /* Data */ | 64 | /* Data */ |
65 | .data : AT(ADDR(.data) - LOAD_OFFSET) { | 65 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
66 | DATA_DATA | 66 | DATA_DATA |
@@ -119,7 +119,7 @@ SECTIONS | |||
119 | .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) | 119 | .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) |
120 | { *(.vsyscall_3) } | 120 | { *(.vsyscall_3) } |
121 | 121 | ||
122 | . = VSYSCALL_VIRT_ADDR + 4096; | 122 | . = VSYSCALL_VIRT_ADDR + PAGE_SIZE; |
123 | 123 | ||
124 | #undef VSYSCALL_ADDR | 124 | #undef VSYSCALL_ADDR |
125 | #undef VSYSCALL_PHYS_ADDR | 125 | #undef VSYSCALL_PHYS_ADDR |
@@ -129,28 +129,28 @@ SECTIONS | |||
129 | #undef VVIRT_OFFSET | 129 | #undef VVIRT_OFFSET |
130 | #undef VVIRT | 130 | #undef VVIRT |
131 | 131 | ||
132 | . = ALIGN(8192); /* init_task */ | 132 | . = ALIGN(THREAD_SIZE); /* init_task */ |
133 | .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { | 133 | .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { |
134 | *(.data.init_task) | 134 | *(.data.init_task) |
135 | }:data.init | 135 | }:data.init |
136 | 136 | ||
137 | . = ALIGN(4096); | 137 | . = ALIGN(PAGE_SIZE); |
138 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { | 138 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { |
139 | *(.data.page_aligned) | 139 | *(.data.page_aligned) |
140 | } | 140 | } |
141 | 141 | ||
142 | /* might get freed after init */ | 142 | /* might get freed after init */ |
143 | . = ALIGN(4096); | 143 | . = ALIGN(PAGE_SIZE); |
144 | __smp_alt_begin = .; | 144 | __smp_alt_begin = .; |
145 | __smp_locks = .; | 145 | __smp_locks = .; |
146 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | 146 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { |
147 | *(.smp_locks) | 147 | *(.smp_locks) |
148 | } | 148 | } |
149 | __smp_locks_end = .; | 149 | __smp_locks_end = .; |
150 | . = ALIGN(4096); | 150 | . = ALIGN(PAGE_SIZE); |
151 | __smp_alt_end = .; | 151 | __smp_alt_end = .; |
152 | 152 | ||
153 | . = ALIGN(4096); /* Init code and data */ | 153 | . = ALIGN(PAGE_SIZE); /* Init code and data */ |
154 | __init_begin = .; | 154 | __init_begin = .; |
155 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { | 155 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { |
156 | _sinittext = .; | 156 | _sinittext = .; |
@@ -191,7 +191,7 @@ SECTIONS | |||
191 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { | 191 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { |
192 | *(.altinstructions) | 192 | *(.altinstructions) |
193 | } | 193 | } |
194 | __alt_instructions_end = .; | 194 | __alt_instructions_end = .; |
195 | .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { | 195 | .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { |
196 | *(.altinstr_replacement) | 196 | *(.altinstr_replacement) |
197 | } | 197 | } |
@@ -207,25 +207,25 @@ SECTIONS | |||
207 | /* vdso blob that is mapped into user space */ | 207 | /* vdso blob that is mapped into user space */ |
208 | vdso_start = . ; | 208 | vdso_start = . ; |
209 | .vdso : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) } | 209 | .vdso : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) } |
210 | . = ALIGN(4096); | 210 | . = ALIGN(PAGE_SIZE); |
211 | vdso_end = .; | 211 | vdso_end = .; |
212 | 212 | ||
213 | #ifdef CONFIG_BLK_DEV_INITRD | 213 | #ifdef CONFIG_BLK_DEV_INITRD |
214 | . = ALIGN(4096); | 214 | . = ALIGN(PAGE_SIZE); |
215 | __initramfs_start = .; | 215 | __initramfs_start = .; |
216 | .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } | 216 | .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } |
217 | __initramfs_end = .; | 217 | __initramfs_end = .; |
218 | #endif | 218 | #endif |
219 | 219 | ||
220 | PERCPU(4096) | 220 | PERCPU(PAGE_SIZE) |
221 | 221 | ||
222 | . = ALIGN(4096); | 222 | . = ALIGN(PAGE_SIZE); |
223 | __init_end = .; | 223 | __init_end = .; |
224 | 224 | ||
225 | . = ALIGN(4096); | 225 | . = ALIGN(PAGE_SIZE); |
226 | __nosave_begin = .; | 226 | __nosave_begin = .; |
227 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } | 227 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } |
228 | . = ALIGN(4096); | 228 | . = ALIGN(PAGE_SIZE); |
229 | __nosave_end = .; | 229 | __nosave_end = .; |
230 | 230 | ||
231 | __bss_start = .; /* BSS */ | 231 | __bss_start = .; /* BSS */ |
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index fd42a4a095fc..459b58a8a15c 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c | |||
@@ -1,117 +1,129 @@ | |||
1 | /* Copyright 2002,2003 Andi Kleen, SuSE Labs. | 1 | /* |
2 | * Copyright 2002, 2003 Andi Kleen, SuSE Labs. | ||
2 | * Subject to the GNU Public License v.2 | 3 | * Subject to the GNU Public License v.2 |
3 | * | 4 | * |
4 | * Wrappers of assembly checksum functions for x86-64. | 5 | * Wrappers of assembly checksum functions for x86-64. |
5 | */ | 6 | */ |
6 | |||
7 | #include <asm/checksum.h> | 7 | #include <asm/checksum.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | 9 | ||
10 | /** | 10 | /** |
11 | * csum_partial_copy_from_user - Copy and checksum from user space. | 11 | * csum_partial_copy_from_user - Copy and checksum from user space. |
12 | * @src: source address (user space) | 12 | * @src: source address (user space) |
13 | * @dst: destination address | 13 | * @dst: destination address |
14 | * @len: number of bytes to be copied. | 14 | * @len: number of bytes to be copied. |
15 | * @isum: initial sum that is added into the result (32bit unfolded) | 15 | * @isum: initial sum that is added into the result (32bit unfolded) |
16 | * @errp: set to -EFAULT for an bad source address. | 16 | * @errp: set to -EFAULT for an bad source address. |
17 | * | 17 | * |
18 | * Returns an 32bit unfolded checksum of the buffer. | 18 | * Returns an 32bit unfolded checksum of the buffer. |
19 | * src and dst are best aligned to 64bits. | 19 | * src and dst are best aligned to 64bits. |
20 | */ | 20 | */ |
21 | __wsum | 21 | __wsum |
22 | csum_partial_copy_from_user(const void __user *src, void *dst, | 22 | csum_partial_copy_from_user(const void __user *src, void *dst, |
23 | int len, __wsum isum, int *errp) | 23 | int len, __wsum isum, int *errp) |
24 | { | 24 | { |
25 | might_sleep(); | 25 | might_sleep(); |
26 | *errp = 0; | 26 | *errp = 0; |
27 | if (likely(access_ok(VERIFY_READ,src, len))) { | 27 | |
28 | /* Why 6, not 7? To handle odd addresses aligned we | 28 | if (!likely(access_ok(VERIFY_READ, src, len))) |
29 | would need to do considerable complications to fix the | 29 | goto out_err; |
30 | checksum which is defined as an 16bit accumulator. The | 30 | |
31 | fix alignment code is primarily for performance | 31 | /* |
32 | compatibility with 32bit and that will handle odd | 32 | * Why 6, not 7? To handle odd addresses aligned we |
33 | addresses slowly too. */ | 33 | * would need to do considerable complications to fix the |
34 | if (unlikely((unsigned long)src & 6)) { | 34 | * checksum which is defined as an 16bit accumulator. The |
35 | while (((unsigned long)src & 6) && len >= 2) { | 35 | * fix alignment code is primarily for performance |
36 | __u16 val16; | 36 | * compatibility with 32bit and that will handle odd |
37 | *errp = __get_user(val16, (const __u16 __user *)src); | 37 | * addresses slowly too. |
38 | if (*errp) | 38 | */ |
39 | return isum; | 39 | if (unlikely((unsigned long)src & 6)) { |
40 | *(__u16 *)dst = val16; | 40 | while (((unsigned long)src & 6) && len >= 2) { |
41 | isum = (__force __wsum)add32_with_carry( | 41 | __u16 val16; |
42 | (__force unsigned)isum, val16); | 42 | |
43 | src += 2; | 43 | *errp = __get_user(val16, (const __u16 __user *)src); |
44 | dst += 2; | 44 | if (*errp) |
45 | len -= 2; | 45 | return isum; |
46 | } | 46 | |
47 | *(__u16 *)dst = val16; | ||
48 | isum = (__force __wsum)add32_with_carry( | ||
49 | (__force unsigned)isum, val16); | ||
50 | src += 2; | ||
51 | dst += 2; | ||
52 | len -= 2; | ||
47 | } | 53 | } |
48 | isum = csum_partial_copy_generic((__force const void *)src, | 54 | } |
49 | dst, len, isum, errp, NULL); | 55 | isum = csum_partial_copy_generic((__force const void *)src, |
50 | if (likely(*errp == 0)) | 56 | dst, len, isum, errp, NULL); |
51 | return isum; | 57 | if (unlikely(*errp)) |
52 | } | 58 | goto out_err; |
59 | |||
60 | return isum; | ||
61 | |||
62 | out_err: | ||
53 | *errp = -EFAULT; | 63 | *errp = -EFAULT; |
54 | memset(dst,0,len); | 64 | memset(dst, 0, len); |
55 | return isum; | ||
56 | } | ||
57 | 65 | ||
66 | return isum; | ||
67 | } | ||
58 | EXPORT_SYMBOL(csum_partial_copy_from_user); | 68 | EXPORT_SYMBOL(csum_partial_copy_from_user); |
59 | 69 | ||
60 | /** | 70 | /** |
61 | * csum_partial_copy_to_user - Copy and checksum to user space. | 71 | * csum_partial_copy_to_user - Copy and checksum to user space. |
62 | * @src: source address | 72 | * @src: source address |
63 | * @dst: destination address (user space) | 73 | * @dst: destination address (user space) |
64 | * @len: number of bytes to be copied. | 74 | * @len: number of bytes to be copied. |
65 | * @isum: initial sum that is added into the result (32bit unfolded) | 75 | * @isum: initial sum that is added into the result (32bit unfolded) |
66 | * @errp: set to -EFAULT for an bad destination address. | 76 | * @errp: set to -EFAULT for an bad destination address. |
67 | * | 77 | * |
68 | * Returns an 32bit unfolded checksum of the buffer. | 78 | * Returns an 32bit unfolded checksum of the buffer. |
69 | * src and dst are best aligned to 64bits. | 79 | * src and dst are best aligned to 64bits. |
70 | */ | 80 | */ |
71 | __wsum | 81 | __wsum |
72 | csum_partial_copy_to_user(const void *src, void __user *dst, | 82 | csum_partial_copy_to_user(const void *src, void __user *dst, |
73 | int len, __wsum isum, int *errp) | 83 | int len, __wsum isum, int *errp) |
74 | { | 84 | { |
75 | might_sleep(); | 85 | might_sleep(); |
86 | |||
76 | if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { | 87 | if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { |
77 | *errp = -EFAULT; | 88 | *errp = -EFAULT; |
78 | return 0; | 89 | return 0; |
79 | } | 90 | } |
80 | 91 | ||
81 | if (unlikely((unsigned long)dst & 6)) { | 92 | if (unlikely((unsigned long)dst & 6)) { |
82 | while (((unsigned long)dst & 6) && len >= 2) { | 93 | while (((unsigned long)dst & 6) && len >= 2) { |
83 | __u16 val16 = *(__u16 *)src; | 94 | __u16 val16 = *(__u16 *)src; |
95 | |||
84 | isum = (__force __wsum)add32_with_carry( | 96 | isum = (__force __wsum)add32_with_carry( |
85 | (__force unsigned)isum, val16); | 97 | (__force unsigned)isum, val16); |
86 | *errp = __put_user(val16, (__u16 __user *)dst); | 98 | *errp = __put_user(val16, (__u16 __user *)dst); |
87 | if (*errp) | 99 | if (*errp) |
88 | return isum; | 100 | return isum; |
89 | src += 2; | 101 | src += 2; |
90 | dst += 2; | 102 | dst += 2; |
91 | len -= 2; | 103 | len -= 2; |
92 | } | 104 | } |
93 | } | 105 | } |
94 | 106 | ||
95 | *errp = 0; | 107 | *errp = 0; |
96 | return csum_partial_copy_generic(src, (void __force *)dst,len,isum,NULL,errp); | 108 | return csum_partial_copy_generic(src, (void __force *)dst, |
97 | } | 109 | len, isum, NULL, errp); |
98 | 110 | } | |
99 | EXPORT_SYMBOL(csum_partial_copy_to_user); | 111 | EXPORT_SYMBOL(csum_partial_copy_to_user); |
100 | 112 | ||
101 | /** | 113 | /** |
102 | * csum_partial_copy_nocheck - Copy and checksum. | 114 | * csum_partial_copy_nocheck - Copy and checksum. |
103 | * @src: source address | 115 | * @src: source address |
104 | * @dst: destination address | 116 | * @dst: destination address |
105 | * @len: number of bytes to be copied. | 117 | * @len: number of bytes to be copied. |
106 | * @isum: initial sum that is added into the result (32bit unfolded) | 118 | * @isum: initial sum that is added into the result (32bit unfolded) |
107 | * | 119 | * |
108 | * Returns an 32bit unfolded checksum of the buffer. | 120 | * Returns an 32bit unfolded checksum of the buffer. |
109 | */ | 121 | */ |
110 | __wsum | 122 | __wsum |
111 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) | 123 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) |
112 | { | 124 | { |
113 | return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL); | 125 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); |
114 | } | 126 | } |
115 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | 127 | EXPORT_SYMBOL(csum_partial_copy_nocheck); |
116 | 128 | ||
117 | __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | 129 | __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
@@ -119,17 +131,20 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | |||
119 | __u32 len, unsigned short proto, __wsum sum) | 131 | __u32 len, unsigned short proto, __wsum sum) |
120 | { | 132 | { |
121 | __u64 rest, sum64; | 133 | __u64 rest, sum64; |
122 | 134 | ||
123 | rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + | 135 | rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + |
124 | (__force __u64)sum; | 136 | (__force __u64)sum; |
125 | asm(" addq (%[saddr]),%[sum]\n" | ||
126 | " adcq 8(%[saddr]),%[sum]\n" | ||
127 | " adcq (%[daddr]),%[sum]\n" | ||
128 | " adcq 8(%[daddr]),%[sum]\n" | ||
129 | " adcq $0,%[sum]\n" | ||
130 | : [sum] "=r" (sum64) | ||
131 | : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr)); | ||
132 | return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); | ||
133 | } | ||
134 | 137 | ||
138 | asm(" addq (%[saddr]),%[sum]\n" | ||
139 | " adcq 8(%[saddr]),%[sum]\n" | ||
140 | " adcq (%[daddr]),%[sum]\n" | ||
141 | " adcq 8(%[daddr]),%[sum]\n" | ||
142 | " adcq $0,%[sum]\n" | ||
143 | |||
144 | : [sum] "=r" (sum64) | ||
145 | : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr)); | ||
146 | |||
147 | return csum_fold( | ||
148 | (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); | ||
149 | } | ||
135 | EXPORT_SYMBOL(csum_ipv6_magic); | 150 | EXPORT_SYMBOL(csum_ipv6_magic); |
diff --git a/arch/x86/lib/io_64.c b/arch/x86/lib/io_64.c index 87b4a4e18039..3f1eb59b5f08 100644 --- a/arch/x86/lib/io_64.c +++ b/arch/x86/lib/io_64.c | |||
@@ -1,23 +1,25 @@ | |||
1 | #include <linux/string.h> | 1 | #include <linux/string.h> |
2 | #include <asm/io.h> | ||
3 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <asm/io.h> | ||
4 | 4 | ||
5 | void __memcpy_toio(unsigned long dst,const void*src,unsigned len) | 5 | void __memcpy_toio(unsigned long dst, const void *src, unsigned len) |
6 | { | 6 | { |
7 | __inline_memcpy((void *) dst,src,len); | 7 | __inline_memcpy((void *)dst, src, len); |
8 | } | 8 | } |
9 | EXPORT_SYMBOL(__memcpy_toio); | 9 | EXPORT_SYMBOL(__memcpy_toio); |
10 | 10 | ||
11 | void __memcpy_fromio(void *dst,unsigned long src,unsigned len) | 11 | void __memcpy_fromio(void *dst, unsigned long src, unsigned len) |
12 | { | 12 | { |
13 | __inline_memcpy(dst,(const void *) src,len); | 13 | __inline_memcpy(dst, (const void *)src, len); |
14 | } | 14 | } |
15 | EXPORT_SYMBOL(__memcpy_fromio); | 15 | EXPORT_SYMBOL(__memcpy_fromio); |
16 | 16 | ||
17 | void memset_io(volatile void __iomem *a, int b, size_t c) | 17 | void memset_io(volatile void __iomem *a, int b, size_t c) |
18 | { | 18 | { |
19 | /* XXX: memset can mangle the IO patterns quite a bit. | 19 | /* |
20 | perhaps it would be better to use a dumb one */ | 20 | * TODO: memset can mangle the IO patterns quite a bit. |
21 | memset((void *)a,b,c); | 21 | * perhaps it would be better to use a dumb one: |
22 | */ | ||
23 | memset((void *)a, b, c); | ||
22 | } | 24 | } |
23 | EXPORT_SYMBOL(memset_io); | 25 | EXPORT_SYMBOL(memset_io); |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 9f42d7e9c158..f4c95aec5acb 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -42,6 +42,22 @@ int page_is_ram(unsigned long pagenr) | |||
42 | unsigned long addr, end; | 42 | unsigned long addr, end; |
43 | int i; | 43 | int i; |
44 | 44 | ||
45 | /* | ||
46 | * A special case is the first 4Kb of memory; | ||
47 | * This is a BIOS owned area, not kernel ram, but generally | ||
48 | * not listed as such in the E820 table. | ||
49 | */ | ||
50 | if (pagenr == 0) | ||
51 | return 0; | ||
52 | |||
53 | /* | ||
54 | * Second special case: Some BIOSen report the PC BIOS | ||
55 | * area (640->1Mb) as ram even though it is not. | ||
56 | */ | ||
57 | if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) && | ||
58 | pagenr < (BIOS_END >> PAGE_SHIFT)) | ||
59 | return 0; | ||
60 | |||
45 | for (i = 0; i < e820.nr_map; i++) { | 61 | for (i = 0; i < e820.nr_map; i++) { |
46 | /* | 62 | /* |
47 | * Not usable memory: | 63 | * Not usable memory: |
@@ -51,14 +67,6 @@ int page_is_ram(unsigned long pagenr) | |||
51 | addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT; | 67 | addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT; |
52 | end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT; | 68 | end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT; |
53 | 69 | ||
54 | /* | ||
55 | * Sanity check: Some BIOSen report areas as RAM that | ||
56 | * are not. Notably the 640->1Mb area, which is the | ||
57 | * PCI BIOS area. | ||
58 | */ | ||
59 | if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) && | ||
60 | end < (BIOS_END >> PAGE_SHIFT)) | ||
61 | continue; | ||
62 | 70 | ||
63 | if ((pagenr >= addr) && (pagenr < end)) | 71 | if ((pagenr >= addr) && (pagenr < end)) |
64 | return 1; | 72 | return 1; |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 3ee14996c829..e2a74ea11a53 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -513,7 +513,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
513 | unsigned long address = cpa->vaddr; | 513 | unsigned long address = cpa->vaddr; |
514 | int do_split, err; | 514 | int do_split, err; |
515 | unsigned int level; | 515 | unsigned int level; |
516 | struct page *kpte_page; | ||
517 | pte_t *kpte, old_pte; | 516 | pte_t *kpte, old_pte; |
518 | 517 | ||
519 | repeat: | 518 | repeat: |
@@ -532,10 +531,6 @@ repeat: | |||
532 | return -EINVAL; | 531 | return -EINVAL; |
533 | } | 532 | } |
534 | 533 | ||
535 | kpte_page = virt_to_page(kpte); | ||
536 | BUG_ON(PageLRU(kpte_page)); | ||
537 | BUG_ON(PageCompound(kpte_page)); | ||
538 | |||
539 | if (level == PG_LEVEL_4K) { | 534 | if (level == PG_LEVEL_4K) { |
540 | pte_t new_pte; | 535 | pte_t new_pte; |
541 | pgprot_t new_prot = pte_pgprot(old_pte); | 536 | pgprot_t new_prot = pte_pgprot(old_pte); |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index ecd91ea8a8ae..845001c617cc 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -166,7 +166,8 @@ static inline int save_add_info(void) {return 0;} | |||
166 | * Both SPARSE and RESERVE need nodes_add information. | 166 | * Both SPARSE and RESERVE need nodes_add information. |
167 | * This code supports one contiguous hot add area per node. | 167 | * This code supports one contiguous hot add area per node. |
168 | */ | 168 | */ |
169 | static int reserve_hotadd(int node, unsigned long start, unsigned long end) | 169 | static int __init |
170 | reserve_hotadd(int node, unsigned long start, unsigned long end) | ||
170 | { | 171 | { |
171 | unsigned long s_pfn = start >> PAGE_SHIFT; | 172 | unsigned long s_pfn = start >> PAGE_SHIFT; |
172 | unsigned long e_pfn = end >> PAGE_SHIFT; | 173 | unsigned long e_pfn = end >> PAGE_SHIFT; |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index b7c67a187b6b..7b6e3bb9b28c 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -541,7 +541,7 @@ void pcibios_disable_device (struct pci_dev *dev) | |||
541 | pcibios_disable_irq(dev); | 541 | pcibios_disable_irq(dev); |
542 | } | 542 | } |
543 | 543 | ||
544 | struct pci_bus *pci_scan_bus_with_sysdata(int busno) | 544 | struct pci_bus *__devinit pci_scan_bus_with_sysdata(int busno) |
545 | { | 545 | { |
546 | struct pci_bus *bus = NULL; | 546 | struct pci_bus *bus = NULL; |
547 | struct pci_sysdata *sd; | 547 | struct pci_sysdata *sd; |
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index 1deb3244b99b..000415947d93 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/segment.h> | 20 | #include <asm/segment.h> |
21 | #include <asm/page.h> | 21 | #include <asm/page.h> |
22 | #include <asm/asm-offsets.h> | 22 | #include <asm/asm-offsets.h> |
23 | #include <asm/processor-flags.h> | ||
23 | 24 | ||
24 | ENTRY(swsusp_arch_suspend) | 25 | ENTRY(swsusp_arch_suspend) |
25 | movq $saved_context, %rax | 26 | movq $saved_context, %rax |
@@ -60,7 +61,7 @@ ENTRY(restore_image) | |||
60 | /* Flush TLB */ | 61 | /* Flush TLB */ |
61 | movq mmu_cr4_features(%rip), %rax | 62 | movq mmu_cr4_features(%rip), %rax |
62 | movq %rax, %rdx | 63 | movq %rax, %rdx |
63 | andq $~(1<<7), %rdx # PGE | 64 | andq $~(X86_CR4_PGE), %rdx |
64 | movq %rdx, %cr4; # turn off PGE | 65 | movq %rdx, %cr4; # turn off PGE |
65 | movq %cr3, %rcx; # flush TLB | 66 | movq %cr3, %rcx; # flush TLB |
66 | movq %rcx, %cr3; | 67 | movq %rcx, %cr3; |
@@ -112,7 +113,7 @@ ENTRY(restore_registers) | |||
112 | /* Flush TLB, including "global" things (vmalloc) */ | 113 | /* Flush TLB, including "global" things (vmalloc) */ |
113 | movq mmu_cr4_features(%rip), %rax | 114 | movq mmu_cr4_features(%rip), %rax |
114 | movq %rax, %rdx | 115 | movq %rax, %rdx |
115 | andq $~(1<<7), %rdx; # PGE | 116 | andq $~(X86_CR4_PGE), %rdx |
116 | movq %rdx, %cr4; # turn off PGE | 117 | movq %rdx, %cr4; # turn off PGE |
117 | movq %cr3, %rcx; # flush TLB | 118 | movq %cr3, %rcx; # flush TLB |
118 | movq %rcx, %cr3 | 119 | movq %rcx, %cr3 |
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 6b1895ccd6b7..f377b76b2f34 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | 21 | ||
22 | extern void fpu_init(void); | 22 | extern void fpu_init(void); |
23 | extern unsigned int mxcsr_feature_mask; | ||
24 | extern void mxcsr_feature_mask_init(void); | 23 | extern void mxcsr_feature_mask_init(void); |
25 | extern void init_fpu(struct task_struct *child); | 24 | extern void init_fpu(struct task_struct *child); |
26 | extern asmlinkage void math_state_restore(void); | 25 | extern asmlinkage void math_state_restore(void); |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 7fd5e0e2361e..0a0b77bc736a 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -188,6 +188,7 @@ static inline unsigned long pmd_bad(pmd_t pmd) | |||
188 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | 188 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) |
189 | #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address)) | 189 | #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address)) |
190 | #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) | 190 | #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) |
191 | static inline int pgd_large(pgd_t pgd) { return 0; } | ||
191 | #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) | 192 | #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) |
192 | 193 | ||
193 | /* PUD - Level3 access */ | 194 | /* PUD - Level3 access */ |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 149920dcd341..45a2f0ab33d0 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -302,10 +302,6 @@ union i387_union { | |||
302 | }; | 302 | }; |
303 | 303 | ||
304 | #ifdef CONFIG_X86_32 | 304 | #ifdef CONFIG_X86_32 |
305 | /* | ||
306 | * the following now lives in the per cpu area: | ||
307 | * extern int cpu_llc_id[NR_CPUS]; | ||
308 | */ | ||
309 | DECLARE_PER_CPU(u8, cpu_llc_id); | 305 | DECLARE_PER_CPU(u8, cpu_llc_id); |
310 | #else | 306 | #else |
311 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | 307 | DECLARE_PER_CPU(struct orig_ist, orig_ist); |
@@ -671,7 +667,6 @@ extern void init_gdt(int cpu); | |||
671 | extern unsigned int machine_id; | 667 | extern unsigned int machine_id; |
672 | extern unsigned int machine_submodel_id; | 668 | extern unsigned int machine_submodel_id; |
673 | extern unsigned int BIOS_revision; | 669 | extern unsigned int BIOS_revision; |
674 | extern unsigned int mca_pentium_flag; | ||
675 | 670 | ||
676 | /* Boot loader type from the setup header */ | 671 | /* Boot loader type from the setup header */ |
677 | extern int bootloader_type; | 672 | extern int bootloader_type; |