aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 19:38:03 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 19:38:03 -0400
commita7aed1c2dc4939d1d61285c738ad32700d791692 (patch)
treea64cda4c4dd29137a09f06a8c1d5db7cd20e7da5 /arch
parent1212663fba7c5e003e05d24f043d5ed57eb18b24 (diff)
parent1b82ba6e47c13ee369a4808f72d003499f8c7920 (diff)
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (35 commits) x86: Add HPET force support for MCP55 (nForce 5) chipsets x86: Force enable HPET for CK804 (nForce 4) chipsets x86: clean up setup.h and the boot code x86: Save registers in saved_context during suspend and hibernation x86: merge setup_32/64.h x86: merge signal_32/64.h x86: merge required-features.h x86: merge sigcontext_32/64.h x86: merge msr_32/64.h x86: merge mttr_32/64.h x86: merge statfs_32/64.h x86: merge stat_32/64.h x86: merge shmbuf_32/64.h x86: merge ptrace_32/64.h x86: merge msgbuf_32/64.h x86: merge elf_32/64.h x86: merge byteorder_32/64.h x86: whitespace cleanup of mce_64.c x86: consolidate the cpu/ related code usage x86: prepare consolidation of cpu/ related code usage ...
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/Kconfig2
-rw-r--r--arch/i386/Makefile6
-rw-r--r--arch/x86/boot/boot.h2
-rw-r--r--arch/x86/boot/compressed/relocs.c6
-rw-r--r--arch/x86/boot/main.c2
-rw-r--r--arch/x86/crypto/Makefile20
-rw-r--r--arch/x86/crypto/Makefile_3212
-rw-r--r--arch/x86/crypto/Makefile_6412
-rw-r--r--arch/x86/kernel/Makefile_322
-rw-r--r--arch/x86/kernel/Makefile_6419
-rw-r--r--arch/x86/kernel/acpi/Makefile10
-rw-r--r--arch/x86/kernel/acpi/Makefile_327
-rw-r--r--arch/x86/kernel/acpi/Makefile_647
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S101
-rw-r--r--arch/x86/kernel/apic_64.c14
-rw-r--r--arch/x86/kernel/asm-offsets_64.c28
-rw-r--r--arch/x86/kernel/cpu/Makefile24
-rw-r--r--arch/x86/kernel/cpu/mcheck/Makefile8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_32.c (renamed from arch/x86/kernel/cpu/mcheck/mce.c)0
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c (renamed from arch/x86/kernel/mce_64.c)164
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c (renamed from arch/x86/kernel/mce_amd_64.c)0
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c (renamed from arch/x86/kernel/mce_intel_64.c)0
-rw-r--r--arch/x86/kernel/crash.c (renamed from arch/x86/kernel/crash_32.c)11
-rw-r--r--arch/x86/kernel/crash_64.c135
-rw-r--r--arch/x86/kernel/head_32.S7
-rw-r--r--arch/x86/kernel/quirks.c55
-rw-r--r--arch/x86/kernel/smpboot_64.c2
-rw-r--r--arch/x86/kernel/suspend_64.c6
-rw-r--r--arch/x86/kernel/suspend_asm_64.S72
-rw-r--r--arch/x86/kernel/tsc_32.c39
-rw-r--r--arch/x86/oprofile/Kconfig17
-rw-r--r--arch/x86_64/Kconfig2
-rw-r--r--arch/x86_64/Makefile6
33 files changed, 368 insertions, 430 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 5bed8be34ba5..b4437ce0f973 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -1270,6 +1270,8 @@ source "drivers/Kconfig"
1270 1270
1271source "fs/Kconfig" 1271source "fs/Kconfig"
1272 1272
1273source "kernel/Kconfig.instrumentation"
1274
1273source "arch/i386/Kconfig.debug" 1275source "arch/i386/Kconfig.debug"
1274 1276
1275source "security/Kconfig" 1277source "security/Kconfig"
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index b81cb64d48e5..f5b9a37def8b 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -20,6 +20,12 @@
20# Fill in SRCARCH 20# Fill in SRCARCH
21SRCARCH := x86 21SRCARCH := x86
22 22
23# BITS is used as extension for files which are available in a 32 bit
24# and a 64 bit version to simplify shared Makefiles.
25# e.g.: obj-y += foo_$(BITS).o
26BITS := 32
27export BITS
28
23HAS_BIARCH := $(call cc-option-yn, -m32) 29HAS_BIARCH := $(call cc-option-yn, -m32)
24ifeq ($(HAS_BIARCH),y) 30ifeq ($(HAS_BIARCH),y)
25AS := $(AS) --32 31AS := $(AS) --32
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 20bab9431acb..5f9a2e72a731 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -23,7 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/edd.h> 24#include <linux/edd.h>
25#include <asm/boot.h> 25#include <asm/boot.h>
26#include <asm/bootparam.h> 26#include <asm/setup.h>
27 27
28/* Useful macros */ 28/* Useful macros */
29#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) 29#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index 2d77ee728f92..7a0d00b2cf28 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -38,11 +38,9 @@ static const char* safe_abs_relocs[] = {
38 38
39static int is_safe_abs_reloc(const char* sym_name) 39static int is_safe_abs_reloc(const char* sym_name)
40{ 40{
41 int i, array_size; 41 int i;
42
43 array_size = sizeof(safe_abs_relocs)/sizeof(char*);
44 42
45 for(i = 0; i < array_size; i++) { 43 for(i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
46 if (!strcmp(sym_name, safe_abs_relocs[i])) 44 if (!strcmp(sym_name, safe_abs_relocs[i]))
47 /* Match found */ 45 /* Match found */
48 return 1; 46 return 1;
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index 0eeef3989a17..1f95750ede28 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -26,8 +26,6 @@ char *heap_end = _end; /* Default end of heap = no heap */
26 * screws up the old-style command line protocol, adjust by 26 * screws up the old-style command line protocol, adjust by
27 * filling in the new-style command line pointer instead. 27 * filling in the new-style command line pointer instead.
28 */ 28 */
29#define OLD_CL_MAGIC 0xA33F
30#define OLD_CL_ADDRESS 0x20
31 29
32static void copy_boot_params(void) 30static void copy_boot_params(void)
33{ 31{
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 18dcdc6fb7aa..46bb609e2444 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -1,5 +1,15 @@
1ifeq ($(CONFIG_X86_32),y) 1#
2include ${srctree}/arch/x86/crypto/Makefile_32 2# Arch-specific CryptoAPI modules.
3else 3#
4include ${srctree}/arch/x86/crypto/Makefile_64 4
5endif 5obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
6obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
7
8obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
9obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
10
11aes-i586-y := aes-i586-asm_32.o aes_32.o
12twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
13
14aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
15twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
diff --git a/arch/x86/crypto/Makefile_32 b/arch/x86/crypto/Makefile_32
deleted file mode 100644
index 2d873a2388ed..000000000000
--- a/arch/x86/crypto/Makefile_32
+++ /dev/null
@@ -1,12 +0,0 @@
1#
2# x86/crypto/Makefile
3#
4# Arch-specific CryptoAPI modules.
5#
6
7obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
8obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
9
10aes-i586-y := aes-i586-asm_32.o aes_32.o
11twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
12
diff --git a/arch/x86/crypto/Makefile_64 b/arch/x86/crypto/Makefile_64
deleted file mode 100644
index b40896276e93..000000000000
--- a/arch/x86/crypto/Makefile_64
+++ /dev/null
@@ -1,12 +0,0 @@
1#
2# x86/crypto/Makefile
3#
4# Arch-specific CryptoAPI modules.
5#
6
7obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
8obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
9
10aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
11twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
12
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index ccea590bbb92..b9d679820306 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -26,7 +26,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse_32.o
26obj-$(CONFIG_X86_LOCAL_APIC) += apic_32.o nmi_32.o 26obj-$(CONFIG_X86_LOCAL_APIC) += apic_32.o nmi_32.o
27obj-$(CONFIG_X86_IO_APIC) += io_apic_32.o 27obj-$(CONFIG_X86_IO_APIC) += io_apic_32.o
28obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 28obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
29obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash_32.o 29obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash.o
30obj-$(CONFIG_CRASH_DUMP) += crash_dump_32.o 30obj-$(CONFIG_CRASH_DUMP) += crash_dump_32.o
31obj-$(CONFIG_X86_NUMAQ) += numaq_32.o 31obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
32obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o 32obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index dec06e769281..466337ae9a1e 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -9,25 +9,21 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
9 x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \ 9 x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \
10 setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \ 10 setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \
11 pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \ 11 pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \
12 perfctr-watchdog.o i8253.o 12 i8253.o
13 13
14obj-$(CONFIG_STACKTRACE) += stacktrace.o 14obj-$(CONFIG_STACKTRACE) += stacktrace.o
15obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o 15obj-y += cpu/
16obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o 16obj-y += acpi/
17obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o
18obj-$(CONFIG_MTRR) += cpu/mtrr/
19obj-$(CONFIG_ACPI) += acpi/
20obj-$(CONFIG_X86_MSR) += msr.o 17obj-$(CONFIG_X86_MSR) += msr.o
21obj-$(CONFIG_MICROCODE) += microcode.o 18obj-$(CONFIG_MICROCODE) += microcode.o
22obj-$(CONFIG_X86_CPUID) += cpuid.o 19obj-$(CONFIG_X86_CPUID) += cpuid.o
23obj-$(CONFIG_SMP) += smp_64.o smpboot_64.o trampoline_64.o tsc_sync.o 20obj-$(CONFIG_SMP) += smp_64.o smpboot_64.o trampoline_64.o tsc_sync.o
24obj-y += apic_64.o nmi_64.o 21obj-y += apic_64.o nmi_64.o
25obj-y += io_apic_64.o mpparse_64.o genapic_64.o genapic_flat_64.o 22obj-y += io_apic_64.o mpparse_64.o genapic_64.o genapic_flat_64.o
26obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash_64.o 23obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash.o
27obj-$(CONFIG_CRASH_DUMP) += crash_dump_64.o 24obj-$(CONFIG_CRASH_DUMP) += crash_dump_64.o
28obj-$(CONFIG_PM) += suspend_64.o 25obj-$(CONFIG_PM) += suspend_64.o
29obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o 26obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o
30obj-$(CONFIG_CPU_FREQ) += cpu/cpufreq/
31obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 27obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
32obj-$(CONFIG_IOMMU) += pci-gart_64.o aperture_64.o 28obj-$(CONFIG_IOMMU) += pci-gart_64.o aperture_64.o
33obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o 29obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
@@ -42,13 +38,6 @@ obj-$(CONFIG_MODULES) += module_64.o
42obj-$(CONFIG_PCI) += early-quirks.o 38obj-$(CONFIG_PCI) += early-quirks.o
43 39
44obj-y += topology.o 40obj-y += topology.o
45obj-y += intel_cacheinfo.o
46obj-y += addon_cpuid_features.o
47obj-y += pcspeaker.o 41obj-y += pcspeaker.o
48 42
49CFLAGS_vsyscall_64.o := $(PROFILING) -g0 43CFLAGS_vsyscall_64.o := $(PROFILING) -g0
50
51therm_throt-y += cpu/mcheck/therm_throt.o
52intel_cacheinfo-y += cpu/intel_cacheinfo.o
53addon_cpuid_features-y += cpu/addon_cpuid_features.o
54perfctr-watchdog-y += cpu/perfctr-watchdog.o
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 3d5671939542..1351c3982ee4 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,5 +1,7 @@
1ifeq ($(CONFIG_X86_32),y) 1obj-$(CONFIG_ACPI) += boot.o
2include ${srctree}/arch/x86/kernel/acpi/Makefile_32 2obj-$(CONFIG_ACPI_SLEEP) += sleep_$(BITS).o wakeup_$(BITS).o
3else 3
4include ${srctree}/arch/x86/kernel/acpi/Makefile_64 4ifneq ($(CONFIG_ACPI_PROCESSOR),)
5obj-y += cstate.o processor.o
5endif 6endif
7
diff --git a/arch/x86/kernel/acpi/Makefile_32 b/arch/x86/kernel/acpi/Makefile_32
deleted file mode 100644
index 045dd54b33e0..000000000000
--- a/arch/x86/kernel/acpi/Makefile_32
+++ /dev/null
@@ -1,7 +0,0 @@
1obj-$(CONFIG_ACPI) += boot.o
2obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o
3
4ifneq ($(CONFIG_ACPI_PROCESSOR),)
5obj-y += cstate.o processor.o
6endif
7
diff --git a/arch/x86/kernel/acpi/Makefile_64 b/arch/x86/kernel/acpi/Makefile_64
deleted file mode 100644
index 629425bc002d..000000000000
--- a/arch/x86/kernel/acpi/Makefile_64
+++ /dev/null
@@ -1,7 +0,0 @@
1obj-y := boot.o
2obj-$(CONFIG_ACPI_SLEEP) += sleep_64.o wakeup_64.o
3
4ifneq ($(CONFIG_ACPI_PROCESSOR),)
5obj-y += processor.o cstate.o
6endif
7
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 55608ec2ed72..5ed3bc5c61d7 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -4,6 +4,7 @@
4#include <asm/pgtable.h> 4#include <asm/pgtable.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm/msr.h> 6#include <asm/msr.h>
7#include <asm/asm-offsets.h>
7 8
8# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 9# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
9# 10#
@@ -342,31 +343,32 @@ do_suspend_lowlevel:
342 xorl %eax, %eax 343 xorl %eax, %eax
343 call save_processor_state 344 call save_processor_state
344 345
345 movq %rsp, saved_context_esp(%rip) 346 movq $saved_context, %rax
346 movq %rax, saved_context_eax(%rip) 347 movq %rsp, pt_regs_rsp(%rax)
347 movq %rbx, saved_context_ebx(%rip) 348 movq %rbp, pt_regs_rbp(%rax)
348 movq %rcx, saved_context_ecx(%rip) 349 movq %rsi, pt_regs_rsi(%rax)
349 movq %rdx, saved_context_edx(%rip) 350 movq %rdi, pt_regs_rdi(%rax)
350 movq %rbp, saved_context_ebp(%rip) 351 movq %rbx, pt_regs_rbx(%rax)
351 movq %rsi, saved_context_esi(%rip) 352 movq %rcx, pt_regs_rcx(%rax)
352 movq %rdi, saved_context_edi(%rip) 353 movq %rdx, pt_regs_rdx(%rax)
353 movq %r8, saved_context_r08(%rip) 354 movq %r8, pt_regs_r8(%rax)
354 movq %r9, saved_context_r09(%rip) 355 movq %r9, pt_regs_r9(%rax)
355 movq %r10, saved_context_r10(%rip) 356 movq %r10, pt_regs_r10(%rax)
356 movq %r11, saved_context_r11(%rip) 357 movq %r11, pt_regs_r11(%rax)
357 movq %r12, saved_context_r12(%rip) 358 movq %r12, pt_regs_r12(%rax)
358 movq %r13, saved_context_r13(%rip) 359 movq %r13, pt_regs_r13(%rax)
359 movq %r14, saved_context_r14(%rip) 360 movq %r14, pt_regs_r14(%rax)
360 movq %r15, saved_context_r15(%rip) 361 movq %r15, pt_regs_r15(%rax)
361 pushfq ; popq saved_context_eflags(%rip) 362 pushfq
363 popq pt_regs_eflags(%rax)
362 364
363 movq $.L97, saved_rip(%rip) 365 movq $.L97, saved_rip(%rip)
364 366
365 movq %rsp,saved_rsp 367 movq %rsp, saved_rsp
366 movq %rbp,saved_rbp 368 movq %rbp, saved_rbp
367 movq %rbx,saved_rbx 369 movq %rbx, saved_rbx
368 movq %rdi,saved_rdi 370 movq %rdi, saved_rdi
369 movq %rsi,saved_rsi 371 movq %rsi, saved_rsi
370 372
371 addq $8, %rsp 373 addq $8, %rsp
372 movl $3, %edi 374 movl $3, %edi
@@ -377,32 +379,35 @@ do_suspend_lowlevel:
377.L99: 379.L99:
378 .align 4 380 .align 4
379 movl $24, %eax 381 movl $24, %eax
380 movw %ax, %ds 382 movw %ax, %ds
381 movq saved_context+58(%rip), %rax 383
382 movq %rax, %cr4 384 /* We don't restore %rax, it must be 0 anyway */
383 movq saved_context+50(%rip), %rax 385 movq $saved_context, %rax
384 movq %rax, %cr3 386 movq saved_context_cr4(%rax), %rbx
385 movq saved_context+42(%rip), %rax 387 movq %rbx, %cr4
386 movq %rax, %cr2 388 movq saved_context_cr3(%rax), %rbx
387 movq saved_context+34(%rip), %rax 389 movq %rbx, %cr3
388 movq %rax, %cr0 390 movq saved_context_cr2(%rax), %rbx
389 pushq saved_context_eflags(%rip) ; popfq 391 movq %rbx, %cr2
390 movq saved_context_esp(%rip), %rsp 392 movq saved_context_cr0(%rax), %rbx
391 movq saved_context_ebp(%rip), %rbp 393 movq %rbx, %cr0
392 movq saved_context_eax(%rip), %rax 394 pushq pt_regs_eflags(%rax)
393 movq saved_context_ebx(%rip), %rbx 395 popfq
394 movq saved_context_ecx(%rip), %rcx 396 movq pt_regs_rsp(%rax), %rsp
395 movq saved_context_edx(%rip), %rdx 397 movq pt_regs_rbp(%rax), %rbp
396 movq saved_context_esi(%rip), %rsi 398 movq pt_regs_rsi(%rax), %rsi
397 movq saved_context_edi(%rip), %rdi 399 movq pt_regs_rdi(%rax), %rdi
398 movq saved_context_r08(%rip), %r8 400 movq pt_regs_rbx(%rax), %rbx
399 movq saved_context_r09(%rip), %r9 401 movq pt_regs_rcx(%rax), %rcx
400 movq saved_context_r10(%rip), %r10 402 movq pt_regs_rdx(%rax), %rdx
401 movq saved_context_r11(%rip), %r11 403 movq pt_regs_r8(%rax), %r8
402 movq saved_context_r12(%rip), %r12 404 movq pt_regs_r9(%rax), %r9
403 movq saved_context_r13(%rip), %r13 405 movq pt_regs_r10(%rax), %r10
404 movq saved_context_r14(%rip), %r14 406 movq pt_regs_r11(%rax), %r11
405 movq saved_context_r15(%rip), %r15 407 movq pt_regs_r12(%rax), %r12
408 movq pt_regs_r13(%rax), %r13
409 movq pt_regs_r14(%rax), %r14
410 movq pt_regs_r15(%rax), %r15
406 411
407 xorl %eax, %eax 412 xorl %eax, %eax
408 addq $8, %rsp 413 addq $8, %rsp
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index f47bc493dba9..f28ccb588fba 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -287,6 +287,20 @@ void disable_local_APIC(void)
287 apic_write(APIC_SPIV, value); 287 apic_write(APIC_SPIV, value);
288} 288}
289 289
290void lapic_shutdown(void)
291{
292 unsigned long flags;
293
294 if (!cpu_has_apic)
295 return;
296
297 local_irq_save(flags);
298
299 disable_local_APIC();
300
301 local_irq_restore(flags);
302}
303
290/* 304/*
291 * This is to verify that we're looking at a real local APIC. 305 * This is to verify that we're looking at a real local APIC.
292 * Check these against your board if the CPUs aren't getting 306 * Check these against your board if the CPUs aren't getting
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 778953bc636c..7e50bda565b4 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -76,6 +76,34 @@ int main(void)
76 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); 76 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
77 DEFINE(pbe_next, offsetof(struct pbe, next)); 77 DEFINE(pbe_next, offsetof(struct pbe, next));
78 BLANK(); 78 BLANK();
79#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
80 ENTRY(rbx);
81 ENTRY(rbx);
82 ENTRY(rcx);
83 ENTRY(rdx);
84 ENTRY(rsp);
85 ENTRY(rbp);
86 ENTRY(rsi);
87 ENTRY(rdi);
88 ENTRY(r8);
89 ENTRY(r9);
90 ENTRY(r10);
91 ENTRY(r11);
92 ENTRY(r12);
93 ENTRY(r13);
94 ENTRY(r14);
95 ENTRY(r15);
96 ENTRY(eflags);
97 BLANK();
98#undef ENTRY
99#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
100 ENTRY(cr0);
101 ENTRY(cr2);
102 ENTRY(cr3);
103 ENTRY(cr4);
104 ENTRY(cr8);
105 BLANK();
106#undef ENTRY
79 DEFINE(TSS_ist, offsetof(struct tss_struct, ist)); 107 DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
80 BLANK(); 108 BLANK();
81 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); 109 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 778396c78d65..cfdb2f3bd763 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -2,19 +2,19 @@
2# Makefile for x86-compatible CPU details and quirks 2# Makefile for x86-compatible CPU details and quirks
3# 3#
4 4
5obj-y := common.o proc.o bugs.o 5obj-y := intel_cacheinfo.o addon_cpuid_features.o
6 6
7obj-y += amd.o 7obj-$(CONFIG_X86_32) += common.o proc.o bugs.o
8obj-y += cyrix.o 8obj-$(CONFIG_X86_32) += amd.o
9obj-y += centaur.o 9obj-$(CONFIG_X86_32) += cyrix.o
10obj-y += transmeta.o 10obj-$(CONFIG_X86_32) += centaur.o
11obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o 11obj-$(CONFIG_X86_32) += transmeta.o
12obj-y += nexgen.o 12obj-$(CONFIG_X86_32) += intel.o
13obj-y += umc.o 13obj-$(CONFIG_X86_32) += nexgen.o
14obj-$(CONFIG_X86_32) += umc.o
14 15
15obj-$(CONFIG_X86_MCE) += mcheck/ 16obj-$(CONFIG_X86_MCE) += mcheck/
16 17obj-$(CONFIG_MTRR) += mtrr/
17obj-$(CONFIG_MTRR) += mtrr/ 18obj-$(CONFIG_CPU_FREQ) += cpufreq/
18obj-$(CONFIG_CPU_FREQ) += cpufreq/
19 19
20obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 20obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index f1ebe1c1c17a..d7d2323bbb69 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -1,2 +1,6 @@
1obj-y = mce.o k7.o p4.o p5.o p6.o winchip.o therm_throt.o 1obj-y = mce_$(BITS).o therm_throt.o
2obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o 2
3obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o
4obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o
5obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o
6obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index 34c781eddee4..34c781eddee4 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 07bbfe7aa7f7..b9f802e35209 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Machine check handler. 2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s). 4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it. 5 * 2004 Andi Kleen. Rewrote most of it.
6 */ 6 */
7 7
8#include <linux/init.h> 8#include <linux/init.h>
@@ -23,7 +23,7 @@
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/kmod.h> 24#include <linux/kmod.h>
25#include <linux/kdebug.h> 25#include <linux/kdebug.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
27#include <asm/msr.h> 27#include <asm/msr.h>
28#include <asm/mce.h> 28#include <asm/mce.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -63,10 +63,10 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
63 * separate MCEs from kernel messages to avoid bogus bug reports. 63 * separate MCEs from kernel messages to avoid bogus bug reports.
64 */ 64 */
65 65
66struct mce_log mcelog = { 66struct mce_log mcelog = {
67 MCE_LOG_SIGNATURE, 67 MCE_LOG_SIGNATURE,
68 MCE_LOG_LEN, 68 MCE_LOG_LEN,
69}; 69};
70 70
71void mce_log(struct mce *mce) 71void mce_log(struct mce *mce)
72{ 72{
@@ -111,42 +111,42 @@ static void print_mce(struct mce *m)
111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", 111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
112 m->cpu, m->mcgstatus, m->bank, m->status); 112 m->cpu, m->mcgstatus, m->bank, m->status);
113 if (m->rip) { 113 if (m->rip) {
114 printk(KERN_EMERG 114 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
115 "RIP%s %02x:<%016Lx> ",
116 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
117 m->cs, m->rip); 116 m->cs, m->rip);
118 if (m->cs == __KERNEL_CS) 117 if (m->cs == __KERNEL_CS)
119 print_symbol("{%s}", m->rip); 118 print_symbol("{%s}", m->rip);
120 printk("\n"); 119 printk("\n");
121 } 120 }
122 printk(KERN_EMERG "TSC %Lx ", m->tsc); 121 printk(KERN_EMERG "TSC %Lx ", m->tsc);
123 if (m->addr) 122 if (m->addr)
124 printk("ADDR %Lx ", m->addr); 123 printk("ADDR %Lx ", m->addr);
125 if (m->misc) 124 if (m->misc)
126 printk("MISC %Lx ", m->misc); 125 printk("MISC %Lx ", m->misc);
127 printk("\n"); 126 printk("\n");
128 printk(KERN_EMERG "This is not a software problem!\n"); 127 printk(KERN_EMERG "This is not a software problem!\n");
129 printk(KERN_EMERG 128 printk(KERN_EMERG "Run through mcelog --ascii to decode "
130 "Run through mcelog --ascii to decode and contact your hardware vendor\n"); 129 "and contact your hardware vendor\n");
131} 130}
132 131
133static void mce_panic(char *msg, struct mce *backup, unsigned long start) 132static void mce_panic(char *msg, struct mce *backup, unsigned long start)
134{ 133{
135 int i; 134 int i;
136 135
137 oops_begin(); 136 oops_begin();
138 for (i = 0; i < MCE_LOG_LEN; i++) { 137 for (i = 0; i < MCE_LOG_LEN; i++) {
139 unsigned long tsc = mcelog.entry[i].tsc; 138 unsigned long tsc = mcelog.entry[i].tsc;
139
140 if (time_before(tsc, start)) 140 if (time_before(tsc, start))
141 continue; 141 continue;
142 print_mce(&mcelog.entry[i]); 142 print_mce(&mcelog.entry[i]);
143 if (backup && mcelog.entry[i].tsc == backup->tsc) 143 if (backup && mcelog.entry[i].tsc == backup->tsc)
144 backup = NULL; 144 backup = NULL;
145 } 145 }
146 if (backup) 146 if (backup)
147 print_mce(backup); 147 print_mce(backup);
148 panic(msg); 148 panic(msg);
149} 149}
150 150
151static int mce_available(struct cpuinfo_x86 *c) 151static int mce_available(struct cpuinfo_x86 *c)
152{ 152{
@@ -170,10 +170,9 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
170 } 170 }
171} 171}
172 172
173/* 173/*
174 * The actual machine check handler 174 * The actual machine check handler
175 */ 175 */
176
177void do_machine_check(struct pt_regs * regs, long error_code) 176void do_machine_check(struct pt_regs * regs, long error_code)
178{ 177{
179 struct mce m, panicm; 178 struct mce m, panicm;
@@ -194,7 +193,8 @@ void do_machine_check(struct pt_regs * regs, long error_code)
194 atomic_inc(&mce_entry); 193 atomic_inc(&mce_entry);
195 194
196 if (regs) 195 if (regs)
197 notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL); 196 notify_die(DIE_NMI, "machine check", regs, error_code, 18,
197 SIGKILL);
198 if (!banks) 198 if (!banks)
199 goto out2; 199 goto out2;
200 200
@@ -204,15 +204,15 @@ void do_machine_check(struct pt_regs * regs, long error_code)
204 /* if the restart IP is not valid, we're done for */ 204 /* if the restart IP is not valid, we're done for */
205 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 205 if (!(m.mcgstatus & MCG_STATUS_RIPV))
206 no_way_out = 1; 206 no_way_out = 1;
207 207
208 rdtscll(mcestart); 208 rdtscll(mcestart);
209 barrier(); 209 barrier();
210 210
211 for (i = 0; i < banks; i++) { 211 for (i = 0; i < banks; i++) {
212 if (!bank[i]) 212 if (!bank[i])
213 continue; 213 continue;
214 214
215 m.misc = 0; 215 m.misc = 0;
216 m.addr = 0; 216 m.addr = 0;
217 m.bank = i; 217 m.bank = i;
218 m.tsc = 0; 218 m.tsc = 0;
@@ -372,7 +372,7 @@ static void mcheck_timer(struct work_struct *work)
372 if (mce_notify_user()) { 372 if (mce_notify_user()) {
373 next_interval = max(next_interval/2, HZ/100); 373 next_interval = max(next_interval/2, HZ/100);
374 } else { 374 } else {
375 next_interval = min(next_interval*2, 375 next_interval = min(next_interval * 2,
376 (int)round_jiffies_relative(check_interval*HZ)); 376 (int)round_jiffies_relative(check_interval*HZ));
377 } 377 }
378 378
@@ -423,18 +423,18 @@ static struct notifier_block mce_idle_notifier = {
423}; 423};
424 424
425static __init int periodic_mcheck_init(void) 425static __init int periodic_mcheck_init(void)
426{ 426{
427 next_interval = check_interval * HZ; 427 next_interval = check_interval * HZ;
428 if (next_interval) 428 if (next_interval)
429 schedule_delayed_work(&mcheck_work, 429 schedule_delayed_work(&mcheck_work,
430 round_jiffies_relative(next_interval)); 430 round_jiffies_relative(next_interval));
431 idle_notifier_register(&mce_idle_notifier); 431 idle_notifier_register(&mce_idle_notifier);
432 return 0; 432 return 0;
433} 433}
434__initcall(periodic_mcheck_init); 434__initcall(periodic_mcheck_init);
435 435
436 436
437/* 437/*
438 * Initialize Machine Checks for a CPU. 438 * Initialize Machine Checks for a CPU.
439 */ 439 */
440static void mce_init(void *dummy) 440static void mce_init(void *dummy)
@@ -444,9 +444,9 @@ static void mce_init(void *dummy)
444 444
445 rdmsrl(MSR_IA32_MCG_CAP, cap); 445 rdmsrl(MSR_IA32_MCG_CAP, cap);
446 banks = cap & 0xff; 446 banks = cap & 0xff;
447 if (banks > NR_BANKS) { 447 if (banks > NR_BANKS) {
448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks); 448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
449 banks = NR_BANKS; 449 banks = NR_BANKS;
450 } 450 }
451 /* Use accurate RIP reporting if available. */ 451 /* Use accurate RIP reporting if available. */
452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) 452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
@@ -464,15 +464,15 @@ static void mce_init(void *dummy)
464 for (i = 0; i < banks; i++) { 464 for (i = 0; i < banks; i++) {
465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); 465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
467 } 467 }
468} 468}
469 469
470/* Add per CPU specific workarounds here */ 470/* Add per CPU specific workarounds here */
471static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 471static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
472{ 472{
473 /* This should be disabled by the BIOS, but isn't always */ 473 /* This should be disabled by the BIOS, but isn't always */
474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { 474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
475 /* disable GART TBL walk error reporting, which trips off 475 /* disable GART TBL walk error reporting, which trips off
476 incorrectly with the IOMMU & 3ware & Cerberus. */ 476 incorrectly with the IOMMU & 3ware & Cerberus. */
477 clear_bit(10, &bank[4]); 477 clear_bit(10, &bank[4]);
478 /* Lots of broken BIOS around that don't clear them 478 /* Lots of broken BIOS around that don't clear them
@@ -480,7 +480,7 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
480 mce_bootlog = 0; 480 mce_bootlog = 0;
481 } 481 }
482 482
483} 483}
484 484
485static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) 485static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
486{ 486{
@@ -496,15 +496,15 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
496 } 496 }
497} 497}
498 498
499/* 499/*
500 * Called for each booted CPU to set up machine checks. 500 * Called for each booted CPU to set up machine checks.
501 * Must be called with preempt off. 501 * Must be called with preempt off.
502 */ 502 */
503void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 503void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
504{ 504{
505 static cpumask_t mce_cpus = CPU_MASK_NONE; 505 static cpumask_t mce_cpus = CPU_MASK_NONE;
506 506
507 mce_cpu_quirks(c); 507 mce_cpu_quirks(c);
508 508
509 if (mce_dont_init || 509 if (mce_dont_init ||
510 cpu_test_and_set(smp_processor_id(), mce_cpus) || 510 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
@@ -553,13 +553,15 @@ static int mce_release(struct inode *inode, struct file *file)
553 return 0; 553 return 0;
554} 554}
555 555
556static void collect_tscs(void *data) 556static void collect_tscs(void *data)
557{ 557{
558 unsigned long *cpu_tsc = (unsigned long *)data; 558 unsigned long *cpu_tsc = (unsigned long *)data;
559
559 rdtscll(cpu_tsc[smp_processor_id()]); 560 rdtscll(cpu_tsc[smp_processor_id()]);
560} 561}
561 562
562static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) 563static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
564 loff_t *off)
563{ 565{
564 unsigned long *cpu_tsc; 566 unsigned long *cpu_tsc;
565 static DECLARE_MUTEX(mce_read_sem); 567 static DECLARE_MUTEX(mce_read_sem);
@@ -571,19 +573,20 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff
571 if (!cpu_tsc) 573 if (!cpu_tsc)
572 return -ENOMEM; 574 return -ENOMEM;
573 575
574 down(&mce_read_sem); 576 down(&mce_read_sem);
575 next = rcu_dereference(mcelog.next); 577 next = rcu_dereference(mcelog.next);
576 578
577 /* Only supports full reads right now */ 579 /* Only supports full reads right now */
578 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 580 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
579 up(&mce_read_sem); 581 up(&mce_read_sem);
580 kfree(cpu_tsc); 582 kfree(cpu_tsc);
581 return -EINVAL; 583 return -EINVAL;
582 } 584 }
583 585
584 err = 0; 586 err = 0;
585 for (i = 0; i < next; i++) { 587 for (i = 0; i < next; i++) {
586 unsigned long start = jiffies; 588 unsigned long start = jiffies;
589
587 while (!mcelog.entry[i].finished) { 590 while (!mcelog.entry[i].finished) {
588 if (time_after_eq(jiffies, start + 2)) { 591 if (time_after_eq(jiffies, start + 2)) {
589 memset(mcelog.entry + i,0, sizeof(struct mce)); 592 memset(mcelog.entry + i,0, sizeof(struct mce));
@@ -593,31 +596,34 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff
593 } 596 }
594 smp_rmb(); 597 smp_rmb();
595 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); 598 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
596 buf += sizeof(struct mce); 599 buf += sizeof(struct mce);
597 timeout: 600 timeout:
598 ; 601 ;
599 } 602 }
600 603
601 memset(mcelog.entry, 0, next * sizeof(struct mce)); 604 memset(mcelog.entry, 0, next * sizeof(struct mce));
602 mcelog.next = 0; 605 mcelog.next = 0;
603 606
604 synchronize_sched(); 607 synchronize_sched();
605 608
606 /* Collect entries that were still getting written before the synchronize. */ 609 /*
607 610 * Collect entries that were still getting written before the
611 * synchronize.
612 */
608 on_each_cpu(collect_tscs, cpu_tsc, 1, 1); 613 on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
609 for (i = next; i < MCE_LOG_LEN; i++) { 614 for (i = next; i < MCE_LOG_LEN; i++) {
610 if (mcelog.entry[i].finished && 615 if (mcelog.entry[i].finished &&
611 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 616 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
612 err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce)); 617 err |= copy_to_user(buf, mcelog.entry+i,
618 sizeof(struct mce));
613 smp_rmb(); 619 smp_rmb();
614 buf += sizeof(struct mce); 620 buf += sizeof(struct mce);
615 memset(&mcelog.entry[i], 0, sizeof(struct mce)); 621 memset(&mcelog.entry[i], 0, sizeof(struct mce));
616 } 622 }
617 } 623 }
618 up(&mce_read_sem); 624 up(&mce_read_sem);
619 kfree(cpu_tsc); 625 kfree(cpu_tsc);
620 return err ? -EFAULT : buf - ubuf; 626 return err ? -EFAULT : buf - ubuf;
621} 627}
622 628
623static unsigned int mce_poll(struct file *file, poll_table *wait) 629static unsigned int mce_poll(struct file *file, poll_table *wait)
@@ -628,26 +634,29 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
628 return 0; 634 return 0;
629} 635}
630 636
631static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg) 637static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd,
638 unsigned long arg)
632{ 639{
633 int __user *p = (int __user *)arg; 640 int __user *p = (int __user *)arg;
641
634 if (!capable(CAP_SYS_ADMIN)) 642 if (!capable(CAP_SYS_ADMIN))
635 return -EPERM; 643 return -EPERM;
636 switch (cmd) { 644 switch (cmd) {
637 case MCE_GET_RECORD_LEN: 645 case MCE_GET_RECORD_LEN:
638 return put_user(sizeof(struct mce), p); 646 return put_user(sizeof(struct mce), p);
639 case MCE_GET_LOG_LEN: 647 case MCE_GET_LOG_LEN:
640 return put_user(MCE_LOG_LEN, p); 648 return put_user(MCE_LOG_LEN, p);
641 case MCE_GETCLEAR_FLAGS: { 649 case MCE_GETCLEAR_FLAGS: {
642 unsigned flags; 650 unsigned flags;
643 do { 651
652 do {
644 flags = mcelog.flags; 653 flags = mcelog.flags;
645 } while (cmpxchg(&mcelog.flags, flags, 0) != flags); 654 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
646 return put_user(flags, p); 655 return put_user(flags, p);
647 } 656 }
648 default: 657 default:
649 return -ENOTTY; 658 return -ENOTTY;
650 } 659 }
651} 660}
652 661
653static const struct file_operations mce_chrdev_ops = { 662static const struct file_operations mce_chrdev_ops = {
@@ -678,10 +687,9 @@ void __init restart_mce(void)
678 set_in_cr4(X86_CR4_MCE); 687 set_in_cr4(X86_CR4_MCE);
679} 688}
680 689
681/* 690/*
682 * Old style boot options parsing. Only for compatibility. 691 * Old style boot options parsing. Only for compatibility.
683 */ 692 */
684
685static int __init mcheck_disable(char *str) 693static int __init mcheck_disable(char *str)
686{ 694{
687 mce_dont_init = 1; 695 mce_dont_init = 1;
@@ -702,16 +710,16 @@ static int __init mcheck_enable(char *str)
702 else if (isdigit(str[0])) 710 else if (isdigit(str[0]))
703 get_option(&str, &tolerant); 711 get_option(&str, &tolerant);
704 else 712 else
705 printk("mce= argument %s ignored. Please use /sys", str); 713 printk("mce= argument %s ignored. Please use /sys", str);
706 return 1; 714 return 1;
707} 715}
708 716
709__setup("nomce", mcheck_disable); 717__setup("nomce", mcheck_disable);
710__setup("mce=", mcheck_enable); 718__setup("mce=", mcheck_enable);
711 719
712/* 720/*
713 * Sysfs support 721 * Sysfs support
714 */ 722 */
715 723
716/* On resume clear all MCE state. Don't want to see leftovers from the BIOS. 724/* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
717 Only one CPU is active at this time, the others get readded later using 725 Only one CPU is active at this time, the others get readded later using
@@ -723,12 +731,12 @@ static int mce_resume(struct sys_device *dev)
723} 731}
724 732
725/* Reinit MCEs after user configuration changes */ 733/* Reinit MCEs after user configuration changes */
726static void mce_restart(void) 734static void mce_restart(void)
727{ 735{
728 if (next_interval) 736 if (next_interval)
729 cancel_delayed_work(&mcheck_work); 737 cancel_delayed_work(&mcheck_work);
730 /* Timer race is harmless here */ 738 /* Timer race is harmless here */
731 on_each_cpu(mce_init, NULL, 1, 1); 739 on_each_cpu(mce_init, NULL, 1, 1);
732 next_interval = check_interval * HZ; 740 next_interval = check_interval * HZ;
733 if (next_interval) 741 if (next_interval)
734 schedule_delayed_work(&mcheck_work, 742 schedule_delayed_work(&mcheck_work,
@@ -744,17 +752,17 @@ DEFINE_PER_CPU(struct sys_device, device_mce);
744 752
745/* Why are there no generic functions for this? */ 753/* Why are there no generic functions for this? */
746#define ACCESSOR(name, var, start) \ 754#define ACCESSOR(name, var, start) \
747 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ 755 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
748 return sprintf(buf, "%lx\n", (unsigned long)var); \ 756 return sprintf(buf, "%lx\n", (unsigned long)var); \
749 } \ 757 } \
750 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \ 758 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
751 char *end; \ 759 char *end; \
752 unsigned long new = simple_strtoul(buf, &end, 0); \ 760 unsigned long new = simple_strtoul(buf, &end, 0); \
753 if (end == buf) return -EINVAL; \ 761 if (end == buf) return -EINVAL; \
754 var = new; \ 762 var = new; \
755 start; \ 763 start; \
756 return end-buf; \ 764 return end-buf; \
757 } \ 765 } \
758 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); 766 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
759 767
760/* TBD should generate these dynamically based on number of available banks */ 768/* TBD should generate these dynamically based on number of available banks */
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 752fb16a817d..752fb16a817d 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
diff --git a/arch/x86/kernel/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index c17eaf5dd6dd..c17eaf5dd6dd 100644
--- a/arch/x86/kernel/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
diff --git a/arch/x86/kernel/crash_32.c b/arch/x86/kernel/crash.c
index 53589d1b1a05..af0253f94a9a 100644
--- a/arch/x86/kernel/crash_32.c
+++ b/arch/x86/kernel/crash.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Architecture specific (i386) functions for kexec based crash dumps. 2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
3 * 3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 * 5 *
@@ -25,8 +25,11 @@
25#include <linux/kdebug.h> 25#include <linux/kdebug.h>
26#include <asm/smp.h> 26#include <asm/smp.h>
27 27
28#ifdef X86_32
28#include <mach_ipi.h> 29#include <mach_ipi.h>
29 30#else
31#include <asm/mach_apic.h>
32#endif
30 33
31/* This keeps a track of which one is crashing cpu. */ 34/* This keeps a track of which one is crashing cpu. */
32static int crashing_cpu; 35static int crashing_cpu;
@@ -38,7 +41,9 @@ static int crash_nmi_callback(struct notifier_block *self,
38 unsigned long val, void *data) 41 unsigned long val, void *data)
39{ 42{
40 struct pt_regs *regs; 43 struct pt_regs *regs;
44#ifdef X86_32
41 struct pt_regs fixed_regs; 45 struct pt_regs fixed_regs;
46#endif
42 int cpu; 47 int cpu;
43 48
44 if (val != DIE_NMI_IPI) 49 if (val != DIE_NMI_IPI)
@@ -55,10 +60,12 @@ static int crash_nmi_callback(struct notifier_block *self,
55 return NOTIFY_STOP; 60 return NOTIFY_STOP;
56 local_irq_disable(); 61 local_irq_disable();
57 62
63#ifdef X86_32
58 if (!user_mode_vm(regs)) { 64 if (!user_mode_vm(regs)) {
59 crash_fixup_ss_esp(&fixed_regs, regs); 65 crash_fixup_ss_esp(&fixed_regs, regs);
60 regs = &fixed_regs; 66 regs = &fixed_regs;
61 } 67 }
68#endif
62 crash_save_cpu(regs, cpu); 69 crash_save_cpu(regs, cpu);
63 disable_local_APIC(); 70 disable_local_APIC();
64 atomic_dec(&waiting_for_crash_ipi); 71 atomic_dec(&waiting_for_crash_ipi);
diff --git a/arch/x86/kernel/crash_64.c b/arch/x86/kernel/crash_64.c
deleted file mode 100644
index 13432a1ae904..000000000000
--- a/arch/x86/kernel/crash_64.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * Architecture specific (x86_64) functions for kexec based crash dumps.
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 *
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/smp.h>
14#include <linux/irq.h>
15#include <linux/reboot.h>
16#include <linux/kexec.h>
17#include <linux/delay.h>
18#include <linux/elf.h>
19#include <linux/elfcore.h>
20#include <linux/kdebug.h>
21
22#include <asm/processor.h>
23#include <asm/hardirq.h>
24#include <asm/nmi.h>
25#include <asm/hw_irq.h>
26#include <asm/mach_apic.h>
27
28/* This keeps a track of which one is crashing cpu. */
29static int crashing_cpu;
30
31#ifdef CONFIG_SMP
32static atomic_t waiting_for_crash_ipi;
33
34static int crash_nmi_callback(struct notifier_block *self,
35 unsigned long val, void *data)
36{
37 struct pt_regs *regs;
38 int cpu;
39
40 if (val != DIE_NMI_IPI)
41 return NOTIFY_OK;
42
43 regs = ((struct die_args *)data)->regs;
44 cpu = raw_smp_processor_id();
45
46 /*
47 * Don't do anything if this handler is invoked on crashing cpu.
48 * Otherwise, system will completely hang. Crashing cpu can get
49 * an NMI if system was initially booted with nmi_watchdog parameter.
50 */
51 if (cpu == crashing_cpu)
52 return NOTIFY_STOP;
53 local_irq_disable();
54
55 crash_save_cpu(regs, cpu);
56 disable_local_APIC();
57 atomic_dec(&waiting_for_crash_ipi);
58 /* Assume hlt works */
59 for(;;)
60 halt();
61
62 return 1;
63}
64
65static void smp_send_nmi_allbutself(void)
66{
67 send_IPI_allbutself(NMI_VECTOR);
68}
69
70/*
71 * This code is a best effort heuristic to get the
72 * other cpus to stop executing. So races with
73 * cpu hotplug shouldn't matter.
74 */
75
76static struct notifier_block crash_nmi_nb = {
77 .notifier_call = crash_nmi_callback,
78};
79
80static void nmi_shootdown_cpus(void)
81{
82 unsigned long msecs;
83
84 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
85 if (register_die_notifier(&crash_nmi_nb))
86 return; /* return what? */
87
88 /*
89 * Ensure the new callback function is set before sending
90 * out the NMI
91 */
92 wmb();
93
94 smp_send_nmi_allbutself();
95
96 msecs = 1000; /* Wait at most a second for the other cpus to stop */
97 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
98 mdelay(1);
99 msecs--;
100 }
101 /* Leave the nmi callback set */
102 disable_local_APIC();
103}
104#else
105static void nmi_shootdown_cpus(void)
106{
107 /* There are no cpus to shootdown */
108}
109#endif
110
111void machine_crash_shutdown(struct pt_regs *regs)
112{
113 /*
114 * This function is only called after the system
115 * has panicked or is otherwise in a critical state.
116 * The minimum amount of code to allow a kexec'd kernel
117 * to run successfully needs to happen here.
118 *
119 * In practice this means shooting down the other cpus in
120 * an SMP system.
121 */
122 /* The kernel is broken so disable interrupts */
123 local_irq_disable();
124
125 /* Make a note of crashing cpu. Will be used in NMI callback.*/
126 crashing_cpu = smp_processor_id();
127 nmi_shootdown_cpus();
128
129 if(cpu_has_apic)
130 disable_local_APIC();
131
132 disable_IO_APIC();
133
134 crash_save_cpu(regs, smp_processor_id());
135}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 00b1c2c56454..374b7ece8961 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -124,12 +124,7 @@ ENTRY(startup_32)
124 movsl 124 movsl
125 movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi 125 movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi
126 andl %esi,%esi 126 andl %esi,%esi
127 jnz 2f # New command line protocol 127 jz 1f # No comand line
128 cmpw $(OLD_CL_MAGIC),OLD_CL_MAGIC_ADDR
129 jne 1f
130 movzwl OLD_CL_OFFSET,%esi
131 addl $(OLD_CL_BASE_ADDR),%esi
1322:
133 movl $(boot_command_line - __PAGE_OFFSET),%edi 128 movl $(boot_command_line - __PAGE_OFFSET),%edi
134 movl $(COMMAND_LINE_SIZE/4),%ecx 129 movl $(COMMAND_LINE_SIZE/4),%ecx
135 rep 130 rep
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index a4ce1911efdf..fab30e134836 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -60,7 +60,8 @@ static enum {
60 NONE_FORCE_HPET_RESUME, 60 NONE_FORCE_HPET_RESUME,
61 OLD_ICH_FORCE_HPET_RESUME, 61 OLD_ICH_FORCE_HPET_RESUME,
62 ICH_FORCE_HPET_RESUME, 62 ICH_FORCE_HPET_RESUME,
63 VT8237_FORCE_HPET_RESUME 63 VT8237_FORCE_HPET_RESUME,
64 NVIDIA_FORCE_HPET_RESUME,
64} force_hpet_resume_type; 65} force_hpet_resume_type;
65 66
66static void __iomem *rcba_base; 67static void __iomem *rcba_base;
@@ -321,6 +322,55 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, 322DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
322 vt8237_force_enable_hpet); 323 vt8237_force_enable_hpet);
323 324
325/*
326 * Undocumented chipset feature taken from LinuxBIOS.
327 */
328static void nvidia_force_hpet_resume(void)
329{
330 pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
331 printk(KERN_DEBUG "Force enabled HPET at resume\n");
332}
333
334static void nvidia_force_enable_hpet(struct pci_dev *dev)
335{
336 u32 uninitialized_var(val);
337
338 if (!hpet_force_user || hpet_address || force_hpet_address)
339 return;
340
341 pci_write_config_dword(dev, 0x44, 0xfed00001);
342 pci_read_config_dword(dev, 0x44, &val);
343 force_hpet_address = val & 0xfffffffe;
344 force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
345 printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
346 force_hpet_address);
347 cached_dev = dev;
348 return;
349}
350
351/* ISA Bridges */
352DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
353 nvidia_force_enable_hpet);
354DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
355 nvidia_force_enable_hpet);
356
357/* LPC bridges */
358DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
359 nvidia_force_enable_hpet);
360DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
361 nvidia_force_enable_hpet);
362DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
363 nvidia_force_enable_hpet);
364DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
365 nvidia_force_enable_hpet);
366DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
367 nvidia_force_enable_hpet);
368DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
369 nvidia_force_enable_hpet);
370DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
371 nvidia_force_enable_hpet);
372DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
373 nvidia_force_enable_hpet);
324 374
325void force_hpet_resume(void) 375void force_hpet_resume(void)
326{ 376{
@@ -334,6 +384,9 @@ void force_hpet_resume(void)
334 case VT8237_FORCE_HPET_RESUME: 384 case VT8237_FORCE_HPET_RESUME:
335 return vt8237_force_hpet_resume(); 385 return vt8237_force_hpet_resume();
336 386
387 case NVIDIA_FORCE_HPET_RESUME:
388 return nvidia_force_hpet_resume();
389
337 default: 390 default:
338 break; 391 break;
339 } 392 }
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index b7e768dd87c9..500670c93d81 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -388,7 +388,7 @@ static void inquire_remote_apic(int apicid)
388 388
389 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); 389 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
390 390
391 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) { 391 for (i = 0; i < ARRAY_SIZE(regs); i++) {
392 printk("... APIC #%d %s: ", apicid, names[i]); 392 printk("... APIC #%d %s: ", apicid, names[i]);
393 393
394 /* 394 /*
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
index bc9f59c246fd..db284ef44d53 100644
--- a/arch/x86/kernel/suspend_64.c
+++ b/arch/x86/kernel/suspend_64.c
@@ -19,12 +19,6 @@ extern const void __nosave_begin, __nosave_end;
19 19
20struct saved_context saved_context; 20struct saved_context saved_context;
21 21
22unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
23unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
24unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
25unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
26unsigned long saved_context_eflags;
27
28void __save_processor_state(struct saved_context *ctxt) 22void __save_processor_state(struct saved_context *ctxt)
29{ 23{
30 kernel_fpu_begin(); 24 kernel_fpu_begin();
diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S
index 48344b666d2c..72f952103e50 100644
--- a/arch/x86/kernel/suspend_asm_64.S
+++ b/arch/x86/kernel/suspend_asm_64.S
@@ -17,24 +17,24 @@
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18 18
19ENTRY(swsusp_arch_suspend) 19ENTRY(swsusp_arch_suspend)
20 20 movq $saved_context, %rax
21 movq %rsp, saved_context_esp(%rip) 21 movq %rsp, pt_regs_rsp(%rax)
22 movq %rax, saved_context_eax(%rip) 22 movq %rbp, pt_regs_rbp(%rax)
23 movq %rbx, saved_context_ebx(%rip) 23 movq %rsi, pt_regs_rsi(%rax)
24 movq %rcx, saved_context_ecx(%rip) 24 movq %rdi, pt_regs_rdi(%rax)
25 movq %rdx, saved_context_edx(%rip) 25 movq %rbx, pt_regs_rbx(%rax)
26 movq %rbp, saved_context_ebp(%rip) 26 movq %rcx, pt_regs_rcx(%rax)
27 movq %rsi, saved_context_esi(%rip) 27 movq %rdx, pt_regs_rdx(%rax)
28 movq %rdi, saved_context_edi(%rip) 28 movq %r8, pt_regs_r8(%rax)
29 movq %r8, saved_context_r08(%rip) 29 movq %r9, pt_regs_r9(%rax)
30 movq %r9, saved_context_r09(%rip) 30 movq %r10, pt_regs_r10(%rax)
31 movq %r10, saved_context_r10(%rip) 31 movq %r11, pt_regs_r11(%rax)
32 movq %r11, saved_context_r11(%rip) 32 movq %r12, pt_regs_r12(%rax)
33 movq %r12, saved_context_r12(%rip) 33 movq %r13, pt_regs_r13(%rax)
34 movq %r13, saved_context_r13(%rip) 34 movq %r14, pt_regs_r14(%rax)
35 movq %r14, saved_context_r14(%rip) 35 movq %r15, pt_regs_r15(%rax)
36 movq %r15, saved_context_r15(%rip) 36 pushfq
37 pushfq ; popq saved_context_eflags(%rip) 37 popq pt_regs_eflags(%rax)
38 38
39 /* save the address of restore_registers */ 39 /* save the address of restore_registers */
40 movq $restore_registers, %rax 40 movq $restore_registers, %rax
@@ -113,23 +113,25 @@ ENTRY(restore_registers)
113 movq %rcx, %cr3 113 movq %rcx, %cr3
114 movq %rax, %cr4; # turn PGE back on 114 movq %rax, %cr4; # turn PGE back on
115 115
116 movq saved_context_esp(%rip), %rsp 116 /* We don't restore %rax, it must be 0 anyway */
117 movq saved_context_ebp(%rip), %rbp 117 movq $saved_context, %rax
118 /* restore GPRs (we don't restore %rax, it must be 0 anyway) */ 118 movq pt_regs_rsp(%rax), %rsp
119 movq saved_context_ebx(%rip), %rbx 119 movq pt_regs_rbp(%rax), %rbp
120 movq saved_context_ecx(%rip), %rcx 120 movq pt_regs_rsi(%rax), %rsi
121 movq saved_context_edx(%rip), %rdx 121 movq pt_regs_rdi(%rax), %rdi
122 movq saved_context_esi(%rip), %rsi 122 movq pt_regs_rbx(%rax), %rbx
123 movq saved_context_edi(%rip), %rdi 123 movq pt_regs_rcx(%rax), %rcx
124 movq saved_context_r08(%rip), %r8 124 movq pt_regs_rdx(%rax), %rdx
125 movq saved_context_r09(%rip), %r9 125 movq pt_regs_r8(%rax), %r8
126 movq saved_context_r10(%rip), %r10 126 movq pt_regs_r9(%rax), %r9
127 movq saved_context_r11(%rip), %r11 127 movq pt_regs_r10(%rax), %r10
128 movq saved_context_r12(%rip), %r12 128 movq pt_regs_r11(%rax), %r11
129 movq saved_context_r13(%rip), %r13 129 movq pt_regs_r12(%rax), %r12
130 movq saved_context_r14(%rip), %r14 130 movq pt_regs_r13(%rax), %r13
131 movq saved_context_r15(%rip), %r15 131 movq pt_regs_r14(%rax), %r14
132 pushq saved_context_eflags(%rip) ; popfq 132 movq pt_regs_r15(%rax), %r15
133 pushq pt_regs_eflags(%rax)
134 popfq
133 135
134 xorq %rax, %rax 136 xorq %rax, %rax
135 137
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index d78444c788a3..9ebc0dab66b4 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -131,38 +131,43 @@ unsigned long native_calculate_cpu_khz(void)
131{ 131{
132 unsigned long long start, end; 132 unsigned long long start, end;
133 unsigned long count; 133 unsigned long count;
134 u64 delta64; 134 u64 delta64 = (u64)ULLONG_MAX;
135 int i; 135 int i;
136 unsigned long flags; 136 unsigned long flags;
137 137
138 local_irq_save(flags); 138 local_irq_save(flags);
139 139
140 /* run 3 times to ensure the cache is warm */ 140 /* run 3 times to ensure the cache is warm and to get an accurate reading */
141 for (i = 0; i < 3; i++) { 141 for (i = 0; i < 3; i++) {
142 mach_prepare_counter(); 142 mach_prepare_counter();
143 rdtscll(start); 143 rdtscll(start);
144 mach_countup(&count); 144 mach_countup(&count);
145 rdtscll(end); 145 rdtscll(end);
146 }
147 /*
148 * Error: ECTCNEVERSET
149 * The CTC wasn't reliable: we got a hit on the very first read,
150 * or the CPU was so fast/slow that the quotient wouldn't fit in
151 * 32 bits..
152 */
153 if (count <= 1)
154 goto err;
155 146
156 delta64 = end - start; 147 /*
148 * Error: ECTCNEVERSET
149 * The CTC wasn't reliable: we got a hit on the very first read,
150 * or the CPU was so fast/slow that the quotient wouldn't fit in
151 * 32 bits..
152 */
153 if (count <= 1)
154 continue;
155
156 /* cpu freq too slow: */
157 if ((end - start) <= CALIBRATE_TIME_MSEC)
158 continue;
159
160 /*
161 * We want the minimum time of all runs in case one of them
162 * is inaccurate due to SMI or other delay
163 */
164 delta64 = min(delta64, (end - start));
165 }
157 166
158 /* cpu freq too fast: */ 167 /* cpu freq too fast (or every run was bad): */
159 if (delta64 > (1ULL<<32)) 168 if (delta64 > (1ULL<<32))
160 goto err; 169 goto err;
161 170
162 /* cpu freq too slow: */
163 if (delta64 <= CALIBRATE_TIME_MSEC)
164 goto err;
165
166 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ 171 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
167 do_div(delta64,CALIBRATE_TIME_MSEC); 172 do_div(delta64,CALIBRATE_TIME_MSEC);
168 173
diff --git a/arch/x86/oprofile/Kconfig b/arch/x86/oprofile/Kconfig
deleted file mode 100644
index d8a84088471a..000000000000
--- a/arch/x86/oprofile/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
1config PROFILING
2 bool "Profiling support (EXPERIMENTAL)"
3 help
4 Say Y here to enable the extended profiling support mechanisms used
5 by profilers such as OProfile.
6
7
8config OPROFILE
9 tristate "OProfile system profiling (EXPERIMENTAL)"
10 depends on PROFILING
11 help
12 OProfile is a profiling system capable of profiling the
13 whole system, include the kernel, kernel modules, libraries,
14 and applications.
15
16 If unsure, say N.
17
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index c2d24991bb2b..308970aa5382 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -833,6 +833,8 @@ source "drivers/firmware/Kconfig"
833 833
834source fs/Kconfig 834source fs/Kconfig
835 835
836source "kernel/Kconfig.instrumentation"
837
836source "arch/x86_64/Kconfig.debug" 838source "arch/x86_64/Kconfig.debug"
837 839
838source "security/Kconfig" 840source "security/Kconfig"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 6d89ab762ffc..20eb69bd5a6d 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -24,6 +24,12 @@
24# Fill in SRCARCH 24# Fill in SRCARCH
25SRCARCH := x86 25SRCARCH := x86
26 26
27# BITS is used as extension for files which are available in a 32 bit
28# and a 64 bit version to simplify shared Makefiles.
29# e.g.: obj-y += foo_$(BITS).o
30BITS := 64
31export BITS
32
27LDFLAGS := -m elf_x86_64 33LDFLAGS := -m elf_x86_64
28OBJCOPYFLAGS := -O binary -R .note -R .comment -S 34OBJCOPYFLAGS := -O binary -R .note -R .comment -S
29LDFLAGS_vmlinux := 35LDFLAGS_vmlinux :=