aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 19:38:03 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 19:38:03 -0400
commita7aed1c2dc4939d1d61285c738ad32700d791692 (patch)
treea64cda4c4dd29137a09f06a8c1d5db7cd20e7da5
parent1212663fba7c5e003e05d24f043d5ed57eb18b24 (diff)
parent1b82ba6e47c13ee369a4808f72d003499f8c7920 (diff)
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (35 commits) x86: Add HPET force support for MCP55 (nForce 5) chipsets x86: Force enable HPET for CK804 (nForce 4) chipsets x86: clean up setup.h and the boot code x86: Save registers in saved_context during suspend and hibernation x86: merge setup_32/64.h x86: merge signal_32/64.h x86: merge required-features.h x86: merge sigcontext_32/64.h x86: merge msr_32/64.h x86: merge mttr_32/64.h x86: merge statfs_32/64.h x86: merge stat_32/64.h x86: merge shmbuf_32/64.h x86: merge ptrace_32/64.h x86: merge msgbuf_32/64.h x86: merge elf_32/64.h x86: merge byteorder_32/64.h x86: whitespace cleanup of mce_64.c x86: consolidate the cpu/ related code usage x86: prepare consolidation of cpu/ related code usage ...
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--arch/i386/Kconfig2
-rw-r--r--arch/i386/Makefile6
-rw-r--r--arch/x86/boot/boot.h2
-rw-r--r--arch/x86/boot/compressed/relocs.c6
-rw-r--r--arch/x86/boot/main.c2
-rw-r--r--arch/x86/crypto/Makefile20
-rw-r--r--arch/x86/crypto/Makefile_3212
-rw-r--r--arch/x86/crypto/Makefile_6412
-rw-r--r--arch/x86/kernel/Makefile_322
-rw-r--r--arch/x86/kernel/Makefile_6419
-rw-r--r--arch/x86/kernel/acpi/Makefile10
-rw-r--r--arch/x86/kernel/acpi/Makefile_327
-rw-r--r--arch/x86/kernel/acpi/Makefile_647
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S101
-rw-r--r--arch/x86/kernel/apic_64.c14
-rw-r--r--arch/x86/kernel/asm-offsets_64.c28
-rw-r--r--arch/x86/kernel/cpu/Makefile24
-rw-r--r--arch/x86/kernel/cpu/mcheck/Makefile8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_32.c (renamed from arch/x86/kernel/cpu/mcheck/mce.c)0
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c (renamed from arch/x86/kernel/mce_64.c)164
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c (renamed from arch/x86/kernel/mce_amd_64.c)0
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c (renamed from arch/x86/kernel/mce_intel_64.c)0
-rw-r--r--arch/x86/kernel/crash.c (renamed from arch/x86/kernel/crash_32.c)11
-rw-r--r--arch/x86/kernel/crash_64.c135
-rw-r--r--arch/x86/kernel/head_32.S7
-rw-r--r--arch/x86/kernel/quirks.c55
-rw-r--r--arch/x86/kernel/smpboot_64.c2
-rw-r--r--arch/x86/kernel/suspend_64.c6
-rw-r--r--arch/x86/kernel/suspend_asm_64.S72
-rw-r--r--arch/x86/kernel/tsc_32.c39
-rw-r--r--arch/x86/oprofile/Kconfig17
-rw-r--r--arch/x86_64/Kconfig2
-rw-r--r--arch/x86_64/Makefile6
-rw-r--r--include/asm-x86/Kbuild27
-rw-r--r--include/asm-x86/a.out.h33
-rw-r--r--include/asm-x86/a.out_32.h27
-rw-r--r--include/asm-x86/a.out_64.h28
-rw-r--r--include/asm-x86/apic_64.h1
-rw-r--r--include/asm-x86/bitops_32.h43
-rw-r--r--include/asm-x86/bitops_64.h42
-rw-r--r--include/asm-x86/byteorder.h81
-rw-r--r--include/asm-x86/byteorder_32.h58
-rw-r--r--include/asm-x86/byteorder_64.h33
-rw-r--r--include/asm-x86/div64.h60
-rw-r--r--include/asm-x86/div64_32.h52
-rw-r--r--include/asm-x86/div64_64.h1
-rw-r--r--include/asm-x86/elf.h299
-rw-r--r--include/asm-x86/elf_32.h165
-rw-r--r--include/asm-x86/elf_64.h180
-rw-r--r--include/asm-x86/mmu.h26
-rw-r--r--include/asm-x86/mmu_32.h18
-rw-r--r--include/asm-x86/mmu_64.h21
-rw-r--r--include/asm-x86/msgbuf.h50
-rw-r--r--include/asm-x86/msgbuf_32.h31
-rw-r--r--include/asm-x86/msgbuf_64.h27
-rw-r--r--include/asm-x86/msr.h357
-rw-r--r--include/asm-x86/msr_32.h161
-rw-r--r--include/asm-x86/msr_64.h187
-rw-r--r--include/asm-x86/mtrr.h175
-rw-r--r--include/asm-x86/mtrr_32.h115
-rw-r--r--include/asm-x86/mtrr_64.h152
-rw-r--r--include/asm-x86/ptrace.h151
-rw-r--r--include/asm-x86/ptrace_32.h65
-rw-r--r--include/asm-x86/ptrace_64.h80
-rw-r--r--include/asm-x86/required-features.h73
-rw-r--r--include/asm-x86/required-features_32.h55
-rw-r--r--include/asm-x86/required-features_64.h46
-rw-r--r--include/asm-x86/setup.h72
-rw-r--r--include/asm-x86/setup_32.h63
-rw-r--r--include/asm-x86/setup_64.h19
-rw-r--r--include/asm-x86/shmbuf.h62
-rw-r--r--include/asm-x86/shmbuf_32.h42
-rw-r--r--include/asm-x86/shmbuf_64.h38
-rw-r--r--include/asm-x86/sigcontext.h149
-rw-r--r--include/asm-x86/sigcontext_32.h85
-rw-r--r--include/asm-x86/sigcontext_64.h55
-rw-r--r--include/asm-x86/signal.h273
-rw-r--r--include/asm-x86/signal_32.h232
-rw-r--r--include/asm-x86/signal_64.h181
-rw-r--r--include/asm-x86/smp_64.h2
-rw-r--r--include/asm-x86/stat.h123
-rw-r--r--include/asm-x86/stat_32.h77
-rw-r--r--include/asm-x86/stat_64.h44
-rw-r--r--include/asm-x86/statfs.h72
-rw-r--r--include/asm-x86/statfs_32.h6
-rw-r--r--include/asm-x86/statfs_64.h58
-rw-r--r--include/asm-x86/suspend_64.h23
88 files changed, 2368 insertions, 3029 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b2361667839f..a13d69b2217d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -422,7 +422,8 @@ and is between 256 and 4096 characters. It is defined in the file
422 hpet= [X86-32,HPET] option to control HPET usage 422 hpet= [X86-32,HPET] option to control HPET usage
423 Format: { enable (default) | disable | force } 423 Format: { enable (default) | disable | force }
424 disable: disable HPET and use PIT instead 424 disable: disable HPET and use PIT instead
425 force: allow force enabled of undocumented chips (ICH4, VIA) 425 force: allow force enabled of undocumented chips (ICH4,
426 VIA, nVidia)
426 427
427 com20020= [HW,NET] ARCnet - COM20020 chipset 428 com20020= [HW,NET] ARCnet - COM20020 chipset
428 Format: 429 Format:
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 5bed8be34ba5..b4437ce0f973 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -1270,6 +1270,8 @@ source "drivers/Kconfig"
1270 1270
1271source "fs/Kconfig" 1271source "fs/Kconfig"
1272 1272
1273source "kernel/Kconfig.instrumentation"
1274
1273source "arch/i386/Kconfig.debug" 1275source "arch/i386/Kconfig.debug"
1274 1276
1275source "security/Kconfig" 1277source "security/Kconfig"
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index b81cb64d48e5..f5b9a37def8b 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -20,6 +20,12 @@
20# Fill in SRCARCH 20# Fill in SRCARCH
21SRCARCH := x86 21SRCARCH := x86
22 22
23# BITS is used as extension for files which are available in a 32 bit
24# and a 64 bit version to simplify shared Makefiles.
25# e.g.: obj-y += foo_$(BITS).o
26BITS := 32
27export BITS
28
23HAS_BIARCH := $(call cc-option-yn, -m32) 29HAS_BIARCH := $(call cc-option-yn, -m32)
24ifeq ($(HAS_BIARCH),y) 30ifeq ($(HAS_BIARCH),y)
25AS := $(AS) --32 31AS := $(AS) --32
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 20bab9431acb..5f9a2e72a731 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -23,7 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/edd.h> 24#include <linux/edd.h>
25#include <asm/boot.h> 25#include <asm/boot.h>
26#include <asm/bootparam.h> 26#include <asm/setup.h>
27 27
28/* Useful macros */ 28/* Useful macros */
29#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) 29#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index 2d77ee728f92..7a0d00b2cf28 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -38,11 +38,9 @@ static const char* safe_abs_relocs[] = {
38 38
39static int is_safe_abs_reloc(const char* sym_name) 39static int is_safe_abs_reloc(const char* sym_name)
40{ 40{
41 int i, array_size; 41 int i;
42
43 array_size = sizeof(safe_abs_relocs)/sizeof(char*);
44 42
45 for(i = 0; i < array_size; i++) { 43 for(i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
46 if (!strcmp(sym_name, safe_abs_relocs[i])) 44 if (!strcmp(sym_name, safe_abs_relocs[i]))
47 /* Match found */ 45 /* Match found */
48 return 1; 46 return 1;
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index 0eeef3989a17..1f95750ede28 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -26,8 +26,6 @@ char *heap_end = _end; /* Default end of heap = no heap */
26 * screws up the old-style command line protocol, adjust by 26 * screws up the old-style command line protocol, adjust by
27 * filling in the new-style command line pointer instead. 27 * filling in the new-style command line pointer instead.
28 */ 28 */
29#define OLD_CL_MAGIC 0xA33F
30#define OLD_CL_ADDRESS 0x20
31 29
32static void copy_boot_params(void) 30static void copy_boot_params(void)
33{ 31{
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 18dcdc6fb7aa..46bb609e2444 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -1,5 +1,15 @@
1ifeq ($(CONFIG_X86_32),y) 1#
2include ${srctree}/arch/x86/crypto/Makefile_32 2# Arch-specific CryptoAPI modules.
3else 3#
4include ${srctree}/arch/x86/crypto/Makefile_64 4
5endif 5obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
6obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
7
8obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
9obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
10
11aes-i586-y := aes-i586-asm_32.o aes_32.o
12twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
13
14aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
15twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
diff --git a/arch/x86/crypto/Makefile_32 b/arch/x86/crypto/Makefile_32
deleted file mode 100644
index 2d873a2388ed..000000000000
--- a/arch/x86/crypto/Makefile_32
+++ /dev/null
@@ -1,12 +0,0 @@
1#
2# x86/crypto/Makefile
3#
4# Arch-specific CryptoAPI modules.
5#
6
7obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
8obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
9
10aes-i586-y := aes-i586-asm_32.o aes_32.o
11twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
12
diff --git a/arch/x86/crypto/Makefile_64 b/arch/x86/crypto/Makefile_64
deleted file mode 100644
index b40896276e93..000000000000
--- a/arch/x86/crypto/Makefile_64
+++ /dev/null
@@ -1,12 +0,0 @@
1#
2# x86/crypto/Makefile
3#
4# Arch-specific CryptoAPI modules.
5#
6
7obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
8obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
9
10aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
11twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
12
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index ccea590bbb92..b9d679820306 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -26,7 +26,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse_32.o
26obj-$(CONFIG_X86_LOCAL_APIC) += apic_32.o nmi_32.o 26obj-$(CONFIG_X86_LOCAL_APIC) += apic_32.o nmi_32.o
27obj-$(CONFIG_X86_IO_APIC) += io_apic_32.o 27obj-$(CONFIG_X86_IO_APIC) += io_apic_32.o
28obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 28obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
29obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash_32.o 29obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash.o
30obj-$(CONFIG_CRASH_DUMP) += crash_dump_32.o 30obj-$(CONFIG_CRASH_DUMP) += crash_dump_32.o
31obj-$(CONFIG_X86_NUMAQ) += numaq_32.o 31obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
32obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o 32obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index dec06e769281..466337ae9a1e 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -9,25 +9,21 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
9 x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \ 9 x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \
10 setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \ 10 setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \
11 pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \ 11 pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \
12 perfctr-watchdog.o i8253.o 12 i8253.o
13 13
14obj-$(CONFIG_STACKTRACE) += stacktrace.o 14obj-$(CONFIG_STACKTRACE) += stacktrace.o
15obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o 15obj-y += cpu/
16obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o 16obj-y += acpi/
17obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o
18obj-$(CONFIG_MTRR) += cpu/mtrr/
19obj-$(CONFIG_ACPI) += acpi/
20obj-$(CONFIG_X86_MSR) += msr.o 17obj-$(CONFIG_X86_MSR) += msr.o
21obj-$(CONFIG_MICROCODE) += microcode.o 18obj-$(CONFIG_MICROCODE) += microcode.o
22obj-$(CONFIG_X86_CPUID) += cpuid.o 19obj-$(CONFIG_X86_CPUID) += cpuid.o
23obj-$(CONFIG_SMP) += smp_64.o smpboot_64.o trampoline_64.o tsc_sync.o 20obj-$(CONFIG_SMP) += smp_64.o smpboot_64.o trampoline_64.o tsc_sync.o
24obj-y += apic_64.o nmi_64.o 21obj-y += apic_64.o nmi_64.o
25obj-y += io_apic_64.o mpparse_64.o genapic_64.o genapic_flat_64.o 22obj-y += io_apic_64.o mpparse_64.o genapic_64.o genapic_flat_64.o
26obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash_64.o 23obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash.o
27obj-$(CONFIG_CRASH_DUMP) += crash_dump_64.o 24obj-$(CONFIG_CRASH_DUMP) += crash_dump_64.o
28obj-$(CONFIG_PM) += suspend_64.o 25obj-$(CONFIG_PM) += suspend_64.o
29obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o 26obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o
30obj-$(CONFIG_CPU_FREQ) += cpu/cpufreq/
31obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 27obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
32obj-$(CONFIG_IOMMU) += pci-gart_64.o aperture_64.o 28obj-$(CONFIG_IOMMU) += pci-gart_64.o aperture_64.o
33obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o 29obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
@@ -42,13 +38,6 @@ obj-$(CONFIG_MODULES) += module_64.o
42obj-$(CONFIG_PCI) += early-quirks.o 38obj-$(CONFIG_PCI) += early-quirks.o
43 39
44obj-y += topology.o 40obj-y += topology.o
45obj-y += intel_cacheinfo.o
46obj-y += addon_cpuid_features.o
47obj-y += pcspeaker.o 41obj-y += pcspeaker.o
48 42
49CFLAGS_vsyscall_64.o := $(PROFILING) -g0 43CFLAGS_vsyscall_64.o := $(PROFILING) -g0
50
51therm_throt-y += cpu/mcheck/therm_throt.o
52intel_cacheinfo-y += cpu/intel_cacheinfo.o
53addon_cpuid_features-y += cpu/addon_cpuid_features.o
54perfctr-watchdog-y += cpu/perfctr-watchdog.o
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 3d5671939542..1351c3982ee4 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,5 +1,7 @@
1ifeq ($(CONFIG_X86_32),y) 1obj-$(CONFIG_ACPI) += boot.o
2include ${srctree}/arch/x86/kernel/acpi/Makefile_32 2obj-$(CONFIG_ACPI_SLEEP) += sleep_$(BITS).o wakeup_$(BITS).o
3else 3
4include ${srctree}/arch/x86/kernel/acpi/Makefile_64 4ifneq ($(CONFIG_ACPI_PROCESSOR),)
5obj-y += cstate.o processor.o
5endif 6endif
7
diff --git a/arch/x86/kernel/acpi/Makefile_32 b/arch/x86/kernel/acpi/Makefile_32
deleted file mode 100644
index 045dd54b33e0..000000000000
--- a/arch/x86/kernel/acpi/Makefile_32
+++ /dev/null
@@ -1,7 +0,0 @@
1obj-$(CONFIG_ACPI) += boot.o
2obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o
3
4ifneq ($(CONFIG_ACPI_PROCESSOR),)
5obj-y += cstate.o processor.o
6endif
7
diff --git a/arch/x86/kernel/acpi/Makefile_64 b/arch/x86/kernel/acpi/Makefile_64
deleted file mode 100644
index 629425bc002d..000000000000
--- a/arch/x86/kernel/acpi/Makefile_64
+++ /dev/null
@@ -1,7 +0,0 @@
1obj-y := boot.o
2obj-$(CONFIG_ACPI_SLEEP) += sleep_64.o wakeup_64.o
3
4ifneq ($(CONFIG_ACPI_PROCESSOR),)
5obj-y += processor.o cstate.o
6endif
7
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 55608ec2ed72..5ed3bc5c61d7 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -4,6 +4,7 @@
4#include <asm/pgtable.h> 4#include <asm/pgtable.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm/msr.h> 6#include <asm/msr.h>
7#include <asm/asm-offsets.h>
7 8
8# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 9# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
9# 10#
@@ -342,31 +343,32 @@ do_suspend_lowlevel:
342 xorl %eax, %eax 343 xorl %eax, %eax
343 call save_processor_state 344 call save_processor_state
344 345
345 movq %rsp, saved_context_esp(%rip) 346 movq $saved_context, %rax
346 movq %rax, saved_context_eax(%rip) 347 movq %rsp, pt_regs_rsp(%rax)
347 movq %rbx, saved_context_ebx(%rip) 348 movq %rbp, pt_regs_rbp(%rax)
348 movq %rcx, saved_context_ecx(%rip) 349 movq %rsi, pt_regs_rsi(%rax)
349 movq %rdx, saved_context_edx(%rip) 350 movq %rdi, pt_regs_rdi(%rax)
350 movq %rbp, saved_context_ebp(%rip) 351 movq %rbx, pt_regs_rbx(%rax)
351 movq %rsi, saved_context_esi(%rip) 352 movq %rcx, pt_regs_rcx(%rax)
352 movq %rdi, saved_context_edi(%rip) 353 movq %rdx, pt_regs_rdx(%rax)
353 movq %r8, saved_context_r08(%rip) 354 movq %r8, pt_regs_r8(%rax)
354 movq %r9, saved_context_r09(%rip) 355 movq %r9, pt_regs_r9(%rax)
355 movq %r10, saved_context_r10(%rip) 356 movq %r10, pt_regs_r10(%rax)
356 movq %r11, saved_context_r11(%rip) 357 movq %r11, pt_regs_r11(%rax)
357 movq %r12, saved_context_r12(%rip) 358 movq %r12, pt_regs_r12(%rax)
358 movq %r13, saved_context_r13(%rip) 359 movq %r13, pt_regs_r13(%rax)
359 movq %r14, saved_context_r14(%rip) 360 movq %r14, pt_regs_r14(%rax)
360 movq %r15, saved_context_r15(%rip) 361 movq %r15, pt_regs_r15(%rax)
361 pushfq ; popq saved_context_eflags(%rip) 362 pushfq
363 popq pt_regs_eflags(%rax)
362 364
363 movq $.L97, saved_rip(%rip) 365 movq $.L97, saved_rip(%rip)
364 366
365 movq %rsp,saved_rsp 367 movq %rsp, saved_rsp
366 movq %rbp,saved_rbp 368 movq %rbp, saved_rbp
367 movq %rbx,saved_rbx 369 movq %rbx, saved_rbx
368 movq %rdi,saved_rdi 370 movq %rdi, saved_rdi
369 movq %rsi,saved_rsi 371 movq %rsi, saved_rsi
370 372
371 addq $8, %rsp 373 addq $8, %rsp
372 movl $3, %edi 374 movl $3, %edi
@@ -377,32 +379,35 @@ do_suspend_lowlevel:
377.L99: 379.L99:
378 .align 4 380 .align 4
379 movl $24, %eax 381 movl $24, %eax
380 movw %ax, %ds 382 movw %ax, %ds
381 movq saved_context+58(%rip), %rax 383
382 movq %rax, %cr4 384 /* We don't restore %rax, it must be 0 anyway */
383 movq saved_context+50(%rip), %rax 385 movq $saved_context, %rax
384 movq %rax, %cr3 386 movq saved_context_cr4(%rax), %rbx
385 movq saved_context+42(%rip), %rax 387 movq %rbx, %cr4
386 movq %rax, %cr2 388 movq saved_context_cr3(%rax), %rbx
387 movq saved_context+34(%rip), %rax 389 movq %rbx, %cr3
388 movq %rax, %cr0 390 movq saved_context_cr2(%rax), %rbx
389 pushq saved_context_eflags(%rip) ; popfq 391 movq %rbx, %cr2
390 movq saved_context_esp(%rip), %rsp 392 movq saved_context_cr0(%rax), %rbx
391 movq saved_context_ebp(%rip), %rbp 393 movq %rbx, %cr0
392 movq saved_context_eax(%rip), %rax 394 pushq pt_regs_eflags(%rax)
393 movq saved_context_ebx(%rip), %rbx 395 popfq
394 movq saved_context_ecx(%rip), %rcx 396 movq pt_regs_rsp(%rax), %rsp
395 movq saved_context_edx(%rip), %rdx 397 movq pt_regs_rbp(%rax), %rbp
396 movq saved_context_esi(%rip), %rsi 398 movq pt_regs_rsi(%rax), %rsi
397 movq saved_context_edi(%rip), %rdi 399 movq pt_regs_rdi(%rax), %rdi
398 movq saved_context_r08(%rip), %r8 400 movq pt_regs_rbx(%rax), %rbx
399 movq saved_context_r09(%rip), %r9 401 movq pt_regs_rcx(%rax), %rcx
400 movq saved_context_r10(%rip), %r10 402 movq pt_regs_rdx(%rax), %rdx
401 movq saved_context_r11(%rip), %r11 403 movq pt_regs_r8(%rax), %r8
402 movq saved_context_r12(%rip), %r12 404 movq pt_regs_r9(%rax), %r9
403 movq saved_context_r13(%rip), %r13 405 movq pt_regs_r10(%rax), %r10
404 movq saved_context_r14(%rip), %r14 406 movq pt_regs_r11(%rax), %r11
405 movq saved_context_r15(%rip), %r15 407 movq pt_regs_r12(%rax), %r12
408 movq pt_regs_r13(%rax), %r13
409 movq pt_regs_r14(%rax), %r14
410 movq pt_regs_r15(%rax), %r15
406 411
407 xorl %eax, %eax 412 xorl %eax, %eax
408 addq $8, %rsp 413 addq $8, %rsp
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index f47bc493dba9..f28ccb588fba 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -287,6 +287,20 @@ void disable_local_APIC(void)
287 apic_write(APIC_SPIV, value); 287 apic_write(APIC_SPIV, value);
288} 288}
289 289
290void lapic_shutdown(void)
291{
292 unsigned long flags;
293
294 if (!cpu_has_apic)
295 return;
296
297 local_irq_save(flags);
298
299 disable_local_APIC();
300
301 local_irq_restore(flags);
302}
303
290/* 304/*
291 * This is to verify that we're looking at a real local APIC. 305 * This is to verify that we're looking at a real local APIC.
292 * Check these against your board if the CPUs aren't getting 306 * Check these against your board if the CPUs aren't getting
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 778953bc636c..7e50bda565b4 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -76,6 +76,34 @@ int main(void)
76 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); 76 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
77 DEFINE(pbe_next, offsetof(struct pbe, next)); 77 DEFINE(pbe_next, offsetof(struct pbe, next));
78 BLANK(); 78 BLANK();
79#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
80 ENTRY(rbx);
81 ENTRY(rbx);
82 ENTRY(rcx);
83 ENTRY(rdx);
84 ENTRY(rsp);
85 ENTRY(rbp);
86 ENTRY(rsi);
87 ENTRY(rdi);
88 ENTRY(r8);
89 ENTRY(r9);
90 ENTRY(r10);
91 ENTRY(r11);
92 ENTRY(r12);
93 ENTRY(r13);
94 ENTRY(r14);
95 ENTRY(r15);
96 ENTRY(eflags);
97 BLANK();
98#undef ENTRY
99#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
100 ENTRY(cr0);
101 ENTRY(cr2);
102 ENTRY(cr3);
103 ENTRY(cr4);
104 ENTRY(cr8);
105 BLANK();
106#undef ENTRY
79 DEFINE(TSS_ist, offsetof(struct tss_struct, ist)); 107 DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
80 BLANK(); 108 BLANK();
81 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); 109 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 778396c78d65..cfdb2f3bd763 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -2,19 +2,19 @@
2# Makefile for x86-compatible CPU details and quirks 2# Makefile for x86-compatible CPU details and quirks
3# 3#
4 4
5obj-y := common.o proc.o bugs.o 5obj-y := intel_cacheinfo.o addon_cpuid_features.o
6 6
7obj-y += amd.o 7obj-$(CONFIG_X86_32) += common.o proc.o bugs.o
8obj-y += cyrix.o 8obj-$(CONFIG_X86_32) += amd.o
9obj-y += centaur.o 9obj-$(CONFIG_X86_32) += cyrix.o
10obj-y += transmeta.o 10obj-$(CONFIG_X86_32) += centaur.o
11obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o 11obj-$(CONFIG_X86_32) += transmeta.o
12obj-y += nexgen.o 12obj-$(CONFIG_X86_32) += intel.o
13obj-y += umc.o 13obj-$(CONFIG_X86_32) += nexgen.o
14obj-$(CONFIG_X86_32) += umc.o
14 15
15obj-$(CONFIG_X86_MCE) += mcheck/ 16obj-$(CONFIG_X86_MCE) += mcheck/
16 17obj-$(CONFIG_MTRR) += mtrr/
17obj-$(CONFIG_MTRR) += mtrr/ 18obj-$(CONFIG_CPU_FREQ) += cpufreq/
18obj-$(CONFIG_CPU_FREQ) += cpufreq/
19 19
20obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 20obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index f1ebe1c1c17a..d7d2323bbb69 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -1,2 +1,6 @@
1obj-y = mce.o k7.o p4.o p5.o p6.o winchip.o therm_throt.o 1obj-y = mce_$(BITS).o therm_throt.o
2obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o 2
3obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o
4obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o
5obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o
6obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index 34c781eddee4..34c781eddee4 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 07bbfe7aa7f7..b9f802e35209 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Machine check handler. 2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s). 4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it. 5 * 2004 Andi Kleen. Rewrote most of it.
6 */ 6 */
7 7
8#include <linux/init.h> 8#include <linux/init.h>
@@ -23,7 +23,7 @@
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/kmod.h> 24#include <linux/kmod.h>
25#include <linux/kdebug.h> 25#include <linux/kdebug.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
27#include <asm/msr.h> 27#include <asm/msr.h>
28#include <asm/mce.h> 28#include <asm/mce.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -63,10 +63,10 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
63 * separate MCEs from kernel messages to avoid bogus bug reports. 63 * separate MCEs from kernel messages to avoid bogus bug reports.
64 */ 64 */
65 65
66struct mce_log mcelog = { 66struct mce_log mcelog = {
67 MCE_LOG_SIGNATURE, 67 MCE_LOG_SIGNATURE,
68 MCE_LOG_LEN, 68 MCE_LOG_LEN,
69}; 69};
70 70
71void mce_log(struct mce *mce) 71void mce_log(struct mce *mce)
72{ 72{
@@ -111,42 +111,42 @@ static void print_mce(struct mce *m)
111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", 111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
112 m->cpu, m->mcgstatus, m->bank, m->status); 112 m->cpu, m->mcgstatus, m->bank, m->status);
113 if (m->rip) { 113 if (m->rip) {
114 printk(KERN_EMERG 114 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
115 "RIP%s %02x:<%016Lx> ",
116 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
117 m->cs, m->rip); 116 m->cs, m->rip);
118 if (m->cs == __KERNEL_CS) 117 if (m->cs == __KERNEL_CS)
119 print_symbol("{%s}", m->rip); 118 print_symbol("{%s}", m->rip);
120 printk("\n"); 119 printk("\n");
121 } 120 }
122 printk(KERN_EMERG "TSC %Lx ", m->tsc); 121 printk(KERN_EMERG "TSC %Lx ", m->tsc);
123 if (m->addr) 122 if (m->addr)
124 printk("ADDR %Lx ", m->addr); 123 printk("ADDR %Lx ", m->addr);
125 if (m->misc) 124 if (m->misc)
126 printk("MISC %Lx ", m->misc); 125 printk("MISC %Lx ", m->misc);
127 printk("\n"); 126 printk("\n");
128 printk(KERN_EMERG "This is not a software problem!\n"); 127 printk(KERN_EMERG "This is not a software problem!\n");
129 printk(KERN_EMERG 128 printk(KERN_EMERG "Run through mcelog --ascii to decode "
130 "Run through mcelog --ascii to decode and contact your hardware vendor\n"); 129 "and contact your hardware vendor\n");
131} 130}
132 131
133static void mce_panic(char *msg, struct mce *backup, unsigned long start) 132static void mce_panic(char *msg, struct mce *backup, unsigned long start)
134{ 133{
135 int i; 134 int i;
136 135
137 oops_begin(); 136 oops_begin();
138 for (i = 0; i < MCE_LOG_LEN; i++) { 137 for (i = 0; i < MCE_LOG_LEN; i++) {
139 unsigned long tsc = mcelog.entry[i].tsc; 138 unsigned long tsc = mcelog.entry[i].tsc;
139
140 if (time_before(tsc, start)) 140 if (time_before(tsc, start))
141 continue; 141 continue;
142 print_mce(&mcelog.entry[i]); 142 print_mce(&mcelog.entry[i]);
143 if (backup && mcelog.entry[i].tsc == backup->tsc) 143 if (backup && mcelog.entry[i].tsc == backup->tsc)
144 backup = NULL; 144 backup = NULL;
145 } 145 }
146 if (backup) 146 if (backup)
147 print_mce(backup); 147 print_mce(backup);
148 panic(msg); 148 panic(msg);
149} 149}
150 150
151static int mce_available(struct cpuinfo_x86 *c) 151static int mce_available(struct cpuinfo_x86 *c)
152{ 152{
@@ -170,10 +170,9 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
170 } 170 }
171} 171}
172 172
173/* 173/*
174 * The actual machine check handler 174 * The actual machine check handler
175 */ 175 */
176
177void do_machine_check(struct pt_regs * regs, long error_code) 176void do_machine_check(struct pt_regs * regs, long error_code)
178{ 177{
179 struct mce m, panicm; 178 struct mce m, panicm;
@@ -194,7 +193,8 @@ void do_machine_check(struct pt_regs * regs, long error_code)
194 atomic_inc(&mce_entry); 193 atomic_inc(&mce_entry);
195 194
196 if (regs) 195 if (regs)
197 notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL); 196 notify_die(DIE_NMI, "machine check", regs, error_code, 18,
197 SIGKILL);
198 if (!banks) 198 if (!banks)
199 goto out2; 199 goto out2;
200 200
@@ -204,15 +204,15 @@ void do_machine_check(struct pt_regs * regs, long error_code)
204 /* if the restart IP is not valid, we're done for */ 204 /* if the restart IP is not valid, we're done for */
205 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 205 if (!(m.mcgstatus & MCG_STATUS_RIPV))
206 no_way_out = 1; 206 no_way_out = 1;
207 207
208 rdtscll(mcestart); 208 rdtscll(mcestart);
209 barrier(); 209 barrier();
210 210
211 for (i = 0; i < banks; i++) { 211 for (i = 0; i < banks; i++) {
212 if (!bank[i]) 212 if (!bank[i])
213 continue; 213 continue;
214 214
215 m.misc = 0; 215 m.misc = 0;
216 m.addr = 0; 216 m.addr = 0;
217 m.bank = i; 217 m.bank = i;
218 m.tsc = 0; 218 m.tsc = 0;
@@ -372,7 +372,7 @@ static void mcheck_timer(struct work_struct *work)
372 if (mce_notify_user()) { 372 if (mce_notify_user()) {
373 next_interval = max(next_interval/2, HZ/100); 373 next_interval = max(next_interval/2, HZ/100);
374 } else { 374 } else {
375 next_interval = min(next_interval*2, 375 next_interval = min(next_interval * 2,
376 (int)round_jiffies_relative(check_interval*HZ)); 376 (int)round_jiffies_relative(check_interval*HZ));
377 } 377 }
378 378
@@ -423,18 +423,18 @@ static struct notifier_block mce_idle_notifier = {
423}; 423};
424 424
425static __init int periodic_mcheck_init(void) 425static __init int periodic_mcheck_init(void)
426{ 426{
427 next_interval = check_interval * HZ; 427 next_interval = check_interval * HZ;
428 if (next_interval) 428 if (next_interval)
429 schedule_delayed_work(&mcheck_work, 429 schedule_delayed_work(&mcheck_work,
430 round_jiffies_relative(next_interval)); 430 round_jiffies_relative(next_interval));
431 idle_notifier_register(&mce_idle_notifier); 431 idle_notifier_register(&mce_idle_notifier);
432 return 0; 432 return 0;
433} 433}
434__initcall(periodic_mcheck_init); 434__initcall(periodic_mcheck_init);
435 435
436 436
437/* 437/*
438 * Initialize Machine Checks for a CPU. 438 * Initialize Machine Checks for a CPU.
439 */ 439 */
440static void mce_init(void *dummy) 440static void mce_init(void *dummy)
@@ -444,9 +444,9 @@ static void mce_init(void *dummy)
444 444
445 rdmsrl(MSR_IA32_MCG_CAP, cap); 445 rdmsrl(MSR_IA32_MCG_CAP, cap);
446 banks = cap & 0xff; 446 banks = cap & 0xff;
447 if (banks > NR_BANKS) { 447 if (banks > NR_BANKS) {
448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks); 448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
449 banks = NR_BANKS; 449 banks = NR_BANKS;
450 } 450 }
451 /* Use accurate RIP reporting if available. */ 451 /* Use accurate RIP reporting if available. */
452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) 452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
@@ -464,15 +464,15 @@ static void mce_init(void *dummy)
464 for (i = 0; i < banks; i++) { 464 for (i = 0; i < banks; i++) {
465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); 465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
467 } 467 }
468} 468}
469 469
470/* Add per CPU specific workarounds here */ 470/* Add per CPU specific workarounds here */
471static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 471static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
472{ 472{
473 /* This should be disabled by the BIOS, but isn't always */ 473 /* This should be disabled by the BIOS, but isn't always */
474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { 474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
475 /* disable GART TBL walk error reporting, which trips off 475 /* disable GART TBL walk error reporting, which trips off
476 incorrectly with the IOMMU & 3ware & Cerberus. */ 476 incorrectly with the IOMMU & 3ware & Cerberus. */
477 clear_bit(10, &bank[4]); 477 clear_bit(10, &bank[4]);
478 /* Lots of broken BIOS around that don't clear them 478 /* Lots of broken BIOS around that don't clear them
@@ -480,7 +480,7 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
480 mce_bootlog = 0; 480 mce_bootlog = 0;
481 } 481 }
482 482
483} 483}
484 484
485static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) 485static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
486{ 486{
@@ -496,15 +496,15 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
496 } 496 }
497} 497}
498 498
499/* 499/*
500 * Called for each booted CPU to set up machine checks. 500 * Called for each booted CPU to set up machine checks.
501 * Must be called with preempt off. 501 * Must be called with preempt off.
502 */ 502 */
503void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 503void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
504{ 504{
505 static cpumask_t mce_cpus = CPU_MASK_NONE; 505 static cpumask_t mce_cpus = CPU_MASK_NONE;
506 506
507 mce_cpu_quirks(c); 507 mce_cpu_quirks(c);
508 508
509 if (mce_dont_init || 509 if (mce_dont_init ||
510 cpu_test_and_set(smp_processor_id(), mce_cpus) || 510 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
@@ -553,13 +553,15 @@ static int mce_release(struct inode *inode, struct file *file)
553 return 0; 553 return 0;
554} 554}
555 555
556static void collect_tscs(void *data) 556static void collect_tscs(void *data)
557{ 557{
558 unsigned long *cpu_tsc = (unsigned long *)data; 558 unsigned long *cpu_tsc = (unsigned long *)data;
559
559 rdtscll(cpu_tsc[smp_processor_id()]); 560 rdtscll(cpu_tsc[smp_processor_id()]);
560} 561}
561 562
562static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) 563static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
564 loff_t *off)
563{ 565{
564 unsigned long *cpu_tsc; 566 unsigned long *cpu_tsc;
565 static DECLARE_MUTEX(mce_read_sem); 567 static DECLARE_MUTEX(mce_read_sem);
@@ -571,19 +573,20 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff
571 if (!cpu_tsc) 573 if (!cpu_tsc)
572 return -ENOMEM; 574 return -ENOMEM;
573 575
574 down(&mce_read_sem); 576 down(&mce_read_sem);
575 next = rcu_dereference(mcelog.next); 577 next = rcu_dereference(mcelog.next);
576 578
577 /* Only supports full reads right now */ 579 /* Only supports full reads right now */
578 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 580 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
579 up(&mce_read_sem); 581 up(&mce_read_sem);
580 kfree(cpu_tsc); 582 kfree(cpu_tsc);
581 return -EINVAL; 583 return -EINVAL;
582 } 584 }
583 585
584 err = 0; 586 err = 0;
585 for (i = 0; i < next; i++) { 587 for (i = 0; i < next; i++) {
586 unsigned long start = jiffies; 588 unsigned long start = jiffies;
589
587 while (!mcelog.entry[i].finished) { 590 while (!mcelog.entry[i].finished) {
588 if (time_after_eq(jiffies, start + 2)) { 591 if (time_after_eq(jiffies, start + 2)) {
589 memset(mcelog.entry + i,0, sizeof(struct mce)); 592 memset(mcelog.entry + i,0, sizeof(struct mce));
@@ -593,31 +596,34 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff
593 } 596 }
594 smp_rmb(); 597 smp_rmb();
595 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); 598 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
596 buf += sizeof(struct mce); 599 buf += sizeof(struct mce);
597 timeout: 600 timeout:
598 ; 601 ;
599 } 602 }
600 603
601 memset(mcelog.entry, 0, next * sizeof(struct mce)); 604 memset(mcelog.entry, 0, next * sizeof(struct mce));
602 mcelog.next = 0; 605 mcelog.next = 0;
603 606
604 synchronize_sched(); 607 synchronize_sched();
605 608
606 /* Collect entries that were still getting written before the synchronize. */ 609 /*
607 610 * Collect entries that were still getting written before the
611 * synchronize.
612 */
608 on_each_cpu(collect_tscs, cpu_tsc, 1, 1); 613 on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
609 for (i = next; i < MCE_LOG_LEN; i++) { 614 for (i = next; i < MCE_LOG_LEN; i++) {
610 if (mcelog.entry[i].finished && 615 if (mcelog.entry[i].finished &&
611 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 616 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
612 err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce)); 617 err |= copy_to_user(buf, mcelog.entry+i,
618 sizeof(struct mce));
613 smp_rmb(); 619 smp_rmb();
614 buf += sizeof(struct mce); 620 buf += sizeof(struct mce);
615 memset(&mcelog.entry[i], 0, sizeof(struct mce)); 621 memset(&mcelog.entry[i], 0, sizeof(struct mce));
616 } 622 }
617 } 623 }
618 up(&mce_read_sem); 624 up(&mce_read_sem);
619 kfree(cpu_tsc); 625 kfree(cpu_tsc);
620 return err ? -EFAULT : buf - ubuf; 626 return err ? -EFAULT : buf - ubuf;
621} 627}
622 628
623static unsigned int mce_poll(struct file *file, poll_table *wait) 629static unsigned int mce_poll(struct file *file, poll_table *wait)
@@ -628,26 +634,29 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
628 return 0; 634 return 0;
629} 635}
630 636
631static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg) 637static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd,
638 unsigned long arg)
632{ 639{
633 int __user *p = (int __user *)arg; 640 int __user *p = (int __user *)arg;
641
634 if (!capable(CAP_SYS_ADMIN)) 642 if (!capable(CAP_SYS_ADMIN))
635 return -EPERM; 643 return -EPERM;
636 switch (cmd) { 644 switch (cmd) {
637 case MCE_GET_RECORD_LEN: 645 case MCE_GET_RECORD_LEN:
638 return put_user(sizeof(struct mce), p); 646 return put_user(sizeof(struct mce), p);
639 case MCE_GET_LOG_LEN: 647 case MCE_GET_LOG_LEN:
640 return put_user(MCE_LOG_LEN, p); 648 return put_user(MCE_LOG_LEN, p);
641 case MCE_GETCLEAR_FLAGS: { 649 case MCE_GETCLEAR_FLAGS: {
642 unsigned flags; 650 unsigned flags;
643 do { 651
652 do {
644 flags = mcelog.flags; 653 flags = mcelog.flags;
645 } while (cmpxchg(&mcelog.flags, flags, 0) != flags); 654 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
646 return put_user(flags, p); 655 return put_user(flags, p);
647 } 656 }
648 default: 657 default:
649 return -ENOTTY; 658 return -ENOTTY;
650 } 659 }
651} 660}
652 661
653static const struct file_operations mce_chrdev_ops = { 662static const struct file_operations mce_chrdev_ops = {
@@ -678,10 +687,9 @@ void __init restart_mce(void)
678 set_in_cr4(X86_CR4_MCE); 687 set_in_cr4(X86_CR4_MCE);
679} 688}
680 689
681/* 690/*
682 * Old style boot options parsing. Only for compatibility. 691 * Old style boot options parsing. Only for compatibility.
683 */ 692 */
684
685static int __init mcheck_disable(char *str) 693static int __init mcheck_disable(char *str)
686{ 694{
687 mce_dont_init = 1; 695 mce_dont_init = 1;
@@ -702,16 +710,16 @@ static int __init mcheck_enable(char *str)
702 else if (isdigit(str[0])) 710 else if (isdigit(str[0]))
703 get_option(&str, &tolerant); 711 get_option(&str, &tolerant);
704 else 712 else
705 printk("mce= argument %s ignored. Please use /sys", str); 713 printk("mce= argument %s ignored. Please use /sys", str);
706 return 1; 714 return 1;
707} 715}
708 716
709__setup("nomce", mcheck_disable); 717__setup("nomce", mcheck_disable);
710__setup("mce=", mcheck_enable); 718__setup("mce=", mcheck_enable);
711 719
712/* 720/*
713 * Sysfs support 721 * Sysfs support
714 */ 722 */
715 723
716/* On resume clear all MCE state. Don't want to see leftovers from the BIOS. 724/* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
717 Only one CPU is active at this time, the others get readded later using 725 Only one CPU is active at this time, the others get readded later using
@@ -723,12 +731,12 @@ static int mce_resume(struct sys_device *dev)
723} 731}
724 732
725/* Reinit MCEs after user configuration changes */ 733/* Reinit MCEs after user configuration changes */
726static void mce_restart(void) 734static void mce_restart(void)
727{ 735{
728 if (next_interval) 736 if (next_interval)
729 cancel_delayed_work(&mcheck_work); 737 cancel_delayed_work(&mcheck_work);
730 /* Timer race is harmless here */ 738 /* Timer race is harmless here */
731 on_each_cpu(mce_init, NULL, 1, 1); 739 on_each_cpu(mce_init, NULL, 1, 1);
732 next_interval = check_interval * HZ; 740 next_interval = check_interval * HZ;
733 if (next_interval) 741 if (next_interval)
734 schedule_delayed_work(&mcheck_work, 742 schedule_delayed_work(&mcheck_work,
@@ -744,17 +752,17 @@ DEFINE_PER_CPU(struct sys_device, device_mce);
744 752
745/* Why are there no generic functions for this? */ 753/* Why are there no generic functions for this? */
746#define ACCESSOR(name, var, start) \ 754#define ACCESSOR(name, var, start) \
747 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ 755 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
748 return sprintf(buf, "%lx\n", (unsigned long)var); \ 756 return sprintf(buf, "%lx\n", (unsigned long)var); \
749 } \ 757 } \
750 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \ 758 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
751 char *end; \ 759 char *end; \
752 unsigned long new = simple_strtoul(buf, &end, 0); \ 760 unsigned long new = simple_strtoul(buf, &end, 0); \
753 if (end == buf) return -EINVAL; \ 761 if (end == buf) return -EINVAL; \
754 var = new; \ 762 var = new; \
755 start; \ 763 start; \
756 return end-buf; \ 764 return end-buf; \
757 } \ 765 } \
758 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); 766 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
759 767
760/* TBD should generate these dynamically based on number of available banks */ 768/* TBD should generate these dynamically based on number of available banks */
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 752fb16a817d..752fb16a817d 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
diff --git a/arch/x86/kernel/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index c17eaf5dd6dd..c17eaf5dd6dd 100644
--- a/arch/x86/kernel/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
diff --git a/arch/x86/kernel/crash_32.c b/arch/x86/kernel/crash.c
index 53589d1b1a05..af0253f94a9a 100644
--- a/arch/x86/kernel/crash_32.c
+++ b/arch/x86/kernel/crash.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Architecture specific (i386) functions for kexec based crash dumps. 2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
3 * 3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 * 5 *
@@ -25,8 +25,11 @@
25#include <linux/kdebug.h> 25#include <linux/kdebug.h>
26#include <asm/smp.h> 26#include <asm/smp.h>
27 27
28#ifdef X86_32
28#include <mach_ipi.h> 29#include <mach_ipi.h>
29 30#else
31#include <asm/mach_apic.h>
32#endif
30 33
31/* This keeps a track of which one is crashing cpu. */ 34/* This keeps a track of which one is crashing cpu. */
32static int crashing_cpu; 35static int crashing_cpu;
@@ -38,7 +41,9 @@ static int crash_nmi_callback(struct notifier_block *self,
38 unsigned long val, void *data) 41 unsigned long val, void *data)
39{ 42{
40 struct pt_regs *regs; 43 struct pt_regs *regs;
44#ifdef X86_32
41 struct pt_regs fixed_regs; 45 struct pt_regs fixed_regs;
46#endif
42 int cpu; 47 int cpu;
43 48
44 if (val != DIE_NMI_IPI) 49 if (val != DIE_NMI_IPI)
@@ -55,10 +60,12 @@ static int crash_nmi_callback(struct notifier_block *self,
55 return NOTIFY_STOP; 60 return NOTIFY_STOP;
56 local_irq_disable(); 61 local_irq_disable();
57 62
63#ifdef X86_32
58 if (!user_mode_vm(regs)) { 64 if (!user_mode_vm(regs)) {
59 crash_fixup_ss_esp(&fixed_regs, regs); 65 crash_fixup_ss_esp(&fixed_regs, regs);
60 regs = &fixed_regs; 66 regs = &fixed_regs;
61 } 67 }
68#endif
62 crash_save_cpu(regs, cpu); 69 crash_save_cpu(regs, cpu);
63 disable_local_APIC(); 70 disable_local_APIC();
64 atomic_dec(&waiting_for_crash_ipi); 71 atomic_dec(&waiting_for_crash_ipi);
diff --git a/arch/x86/kernel/crash_64.c b/arch/x86/kernel/crash_64.c
deleted file mode 100644
index 13432a1ae904..000000000000
--- a/arch/x86/kernel/crash_64.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * Architecture specific (x86_64) functions for kexec based crash dumps.
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 *
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/smp.h>
14#include <linux/irq.h>
15#include <linux/reboot.h>
16#include <linux/kexec.h>
17#include <linux/delay.h>
18#include <linux/elf.h>
19#include <linux/elfcore.h>
20#include <linux/kdebug.h>
21
22#include <asm/processor.h>
23#include <asm/hardirq.h>
24#include <asm/nmi.h>
25#include <asm/hw_irq.h>
26#include <asm/mach_apic.h>
27
28/* This keeps a track of which one is crashing cpu. */
29static int crashing_cpu;
30
31#ifdef CONFIG_SMP
32static atomic_t waiting_for_crash_ipi;
33
34static int crash_nmi_callback(struct notifier_block *self,
35 unsigned long val, void *data)
36{
37 struct pt_regs *regs;
38 int cpu;
39
40 if (val != DIE_NMI_IPI)
41 return NOTIFY_OK;
42
43 regs = ((struct die_args *)data)->regs;
44 cpu = raw_smp_processor_id();
45
46 /*
47 * Don't do anything if this handler is invoked on crashing cpu.
48 * Otherwise, system will completely hang. Crashing cpu can get
49 * an NMI if system was initially booted with nmi_watchdog parameter.
50 */
51 if (cpu == crashing_cpu)
52 return NOTIFY_STOP;
53 local_irq_disable();
54
55 crash_save_cpu(regs, cpu);
56 disable_local_APIC();
57 atomic_dec(&waiting_for_crash_ipi);
58 /* Assume hlt works */
59 for(;;)
60 halt();
61
62 return 1;
63}
64
65static void smp_send_nmi_allbutself(void)
66{
67 send_IPI_allbutself(NMI_VECTOR);
68}
69
70/*
71 * This code is a best effort heuristic to get the
72 * other cpus to stop executing. So races with
73 * cpu hotplug shouldn't matter.
74 */
75
76static struct notifier_block crash_nmi_nb = {
77 .notifier_call = crash_nmi_callback,
78};
79
80static void nmi_shootdown_cpus(void)
81{
82 unsigned long msecs;
83
84 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
85 if (register_die_notifier(&crash_nmi_nb))
86 return; /* return what? */
87
88 /*
89 * Ensure the new callback function is set before sending
90 * out the NMI
91 */
92 wmb();
93
94 smp_send_nmi_allbutself();
95
96 msecs = 1000; /* Wait at most a second for the other cpus to stop */
97 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
98 mdelay(1);
99 msecs--;
100 }
101 /* Leave the nmi callback set */
102 disable_local_APIC();
103}
104#else
105static void nmi_shootdown_cpus(void)
106{
107 /* There are no cpus to shootdown */
108}
109#endif
110
111void machine_crash_shutdown(struct pt_regs *regs)
112{
113 /*
114 * This function is only called after the system
115 * has panicked or is otherwise in a critical state.
116 * The minimum amount of code to allow a kexec'd kernel
117 * to run successfully needs to happen here.
118 *
119 * In practice this means shooting down the other cpus in
120 * an SMP system.
121 */
122 /* The kernel is broken so disable interrupts */
123 local_irq_disable();
124
125 /* Make a note of crashing cpu. Will be used in NMI callback.*/
126 crashing_cpu = smp_processor_id();
127 nmi_shootdown_cpus();
128
129 if(cpu_has_apic)
130 disable_local_APIC();
131
132 disable_IO_APIC();
133
134 crash_save_cpu(regs, smp_processor_id());
135}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 00b1c2c56454..374b7ece8961 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -124,12 +124,7 @@ ENTRY(startup_32)
124 movsl 124 movsl
125 movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi 125 movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi
126 andl %esi,%esi 126 andl %esi,%esi
127 jnz 2f # New command line protocol 127 jz 1f # No comand line
128 cmpw $(OLD_CL_MAGIC),OLD_CL_MAGIC_ADDR
129 jne 1f
130 movzwl OLD_CL_OFFSET,%esi
131 addl $(OLD_CL_BASE_ADDR),%esi
1322:
133 movl $(boot_command_line - __PAGE_OFFSET),%edi 128 movl $(boot_command_line - __PAGE_OFFSET),%edi
134 movl $(COMMAND_LINE_SIZE/4),%ecx 129 movl $(COMMAND_LINE_SIZE/4),%ecx
135 rep 130 rep
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index a4ce1911efdf..fab30e134836 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -60,7 +60,8 @@ static enum {
60 NONE_FORCE_HPET_RESUME, 60 NONE_FORCE_HPET_RESUME,
61 OLD_ICH_FORCE_HPET_RESUME, 61 OLD_ICH_FORCE_HPET_RESUME,
62 ICH_FORCE_HPET_RESUME, 62 ICH_FORCE_HPET_RESUME,
63 VT8237_FORCE_HPET_RESUME 63 VT8237_FORCE_HPET_RESUME,
64 NVIDIA_FORCE_HPET_RESUME,
64} force_hpet_resume_type; 65} force_hpet_resume_type;
65 66
66static void __iomem *rcba_base; 67static void __iomem *rcba_base;
@@ -321,6 +322,55 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, 322DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
322 vt8237_force_enable_hpet); 323 vt8237_force_enable_hpet);
323 324
325/*
326 * Undocumented chipset feature taken from LinuxBIOS.
327 */
328static void nvidia_force_hpet_resume(void)
329{
330 pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
331 printk(KERN_DEBUG "Force enabled HPET at resume\n");
332}
333
334static void nvidia_force_enable_hpet(struct pci_dev *dev)
335{
336 u32 uninitialized_var(val);
337
338 if (!hpet_force_user || hpet_address || force_hpet_address)
339 return;
340
341 pci_write_config_dword(dev, 0x44, 0xfed00001);
342 pci_read_config_dword(dev, 0x44, &val);
343 force_hpet_address = val & 0xfffffffe;
344 force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
345 printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
346 force_hpet_address);
347 cached_dev = dev;
348 return;
349}
350
351/* ISA Bridges */
352DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
353 nvidia_force_enable_hpet);
354DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
355 nvidia_force_enable_hpet);
356
357/* LPC bridges */
358DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
359 nvidia_force_enable_hpet);
360DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
361 nvidia_force_enable_hpet);
362DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
363 nvidia_force_enable_hpet);
364DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
365 nvidia_force_enable_hpet);
366DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
367 nvidia_force_enable_hpet);
368DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
369 nvidia_force_enable_hpet);
370DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
371 nvidia_force_enable_hpet);
372DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
373 nvidia_force_enable_hpet);
324 374
325void force_hpet_resume(void) 375void force_hpet_resume(void)
326{ 376{
@@ -334,6 +384,9 @@ void force_hpet_resume(void)
334 case VT8237_FORCE_HPET_RESUME: 384 case VT8237_FORCE_HPET_RESUME:
335 return vt8237_force_hpet_resume(); 385 return vt8237_force_hpet_resume();
336 386
387 case NVIDIA_FORCE_HPET_RESUME:
388 return nvidia_force_hpet_resume();
389
337 default: 390 default:
338 break; 391 break;
339 } 392 }
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index b7e768dd87c9..500670c93d81 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -388,7 +388,7 @@ static void inquire_remote_apic(int apicid)
388 388
389 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); 389 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
390 390
391 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) { 391 for (i = 0; i < ARRAY_SIZE(regs); i++) {
392 printk("... APIC #%d %s: ", apicid, names[i]); 392 printk("... APIC #%d %s: ", apicid, names[i]);
393 393
394 /* 394 /*
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
index bc9f59c246fd..db284ef44d53 100644
--- a/arch/x86/kernel/suspend_64.c
+++ b/arch/x86/kernel/suspend_64.c
@@ -19,12 +19,6 @@ extern const void __nosave_begin, __nosave_end;
19 19
20struct saved_context saved_context; 20struct saved_context saved_context;
21 21
22unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
23unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
24unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
25unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
26unsigned long saved_context_eflags;
27
28void __save_processor_state(struct saved_context *ctxt) 22void __save_processor_state(struct saved_context *ctxt)
29{ 23{
30 kernel_fpu_begin(); 24 kernel_fpu_begin();
diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S
index 48344b666d2c..72f952103e50 100644
--- a/arch/x86/kernel/suspend_asm_64.S
+++ b/arch/x86/kernel/suspend_asm_64.S
@@ -17,24 +17,24 @@
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18 18
19ENTRY(swsusp_arch_suspend) 19ENTRY(swsusp_arch_suspend)
20 20 movq $saved_context, %rax
21 movq %rsp, saved_context_esp(%rip) 21 movq %rsp, pt_regs_rsp(%rax)
22 movq %rax, saved_context_eax(%rip) 22 movq %rbp, pt_regs_rbp(%rax)
23 movq %rbx, saved_context_ebx(%rip) 23 movq %rsi, pt_regs_rsi(%rax)
24 movq %rcx, saved_context_ecx(%rip) 24 movq %rdi, pt_regs_rdi(%rax)
25 movq %rdx, saved_context_edx(%rip) 25 movq %rbx, pt_regs_rbx(%rax)
26 movq %rbp, saved_context_ebp(%rip) 26 movq %rcx, pt_regs_rcx(%rax)
27 movq %rsi, saved_context_esi(%rip) 27 movq %rdx, pt_regs_rdx(%rax)
28 movq %rdi, saved_context_edi(%rip) 28 movq %r8, pt_regs_r8(%rax)
29 movq %r8, saved_context_r08(%rip) 29 movq %r9, pt_regs_r9(%rax)
30 movq %r9, saved_context_r09(%rip) 30 movq %r10, pt_regs_r10(%rax)
31 movq %r10, saved_context_r10(%rip) 31 movq %r11, pt_regs_r11(%rax)
32 movq %r11, saved_context_r11(%rip) 32 movq %r12, pt_regs_r12(%rax)
33 movq %r12, saved_context_r12(%rip) 33 movq %r13, pt_regs_r13(%rax)
34 movq %r13, saved_context_r13(%rip) 34 movq %r14, pt_regs_r14(%rax)
35 movq %r14, saved_context_r14(%rip) 35 movq %r15, pt_regs_r15(%rax)
36 movq %r15, saved_context_r15(%rip) 36 pushfq
37 pushfq ; popq saved_context_eflags(%rip) 37 popq pt_regs_eflags(%rax)
38 38
39 /* save the address of restore_registers */ 39 /* save the address of restore_registers */
40 movq $restore_registers, %rax 40 movq $restore_registers, %rax
@@ -113,23 +113,25 @@ ENTRY(restore_registers)
113 movq %rcx, %cr3 113 movq %rcx, %cr3
114 movq %rax, %cr4; # turn PGE back on 114 movq %rax, %cr4; # turn PGE back on
115 115
116 movq saved_context_esp(%rip), %rsp 116 /* We don't restore %rax, it must be 0 anyway */
117 movq saved_context_ebp(%rip), %rbp 117 movq $saved_context, %rax
118 /* restore GPRs (we don't restore %rax, it must be 0 anyway) */ 118 movq pt_regs_rsp(%rax), %rsp
119 movq saved_context_ebx(%rip), %rbx 119 movq pt_regs_rbp(%rax), %rbp
120 movq saved_context_ecx(%rip), %rcx 120 movq pt_regs_rsi(%rax), %rsi
121 movq saved_context_edx(%rip), %rdx 121 movq pt_regs_rdi(%rax), %rdi
122 movq saved_context_esi(%rip), %rsi 122 movq pt_regs_rbx(%rax), %rbx
123 movq saved_context_edi(%rip), %rdi 123 movq pt_regs_rcx(%rax), %rcx
124 movq saved_context_r08(%rip), %r8 124 movq pt_regs_rdx(%rax), %rdx
125 movq saved_context_r09(%rip), %r9 125 movq pt_regs_r8(%rax), %r8
126 movq saved_context_r10(%rip), %r10 126 movq pt_regs_r9(%rax), %r9
127 movq saved_context_r11(%rip), %r11 127 movq pt_regs_r10(%rax), %r10
128 movq saved_context_r12(%rip), %r12 128 movq pt_regs_r11(%rax), %r11
129 movq saved_context_r13(%rip), %r13 129 movq pt_regs_r12(%rax), %r12
130 movq saved_context_r14(%rip), %r14 130 movq pt_regs_r13(%rax), %r13
131 movq saved_context_r15(%rip), %r15 131 movq pt_regs_r14(%rax), %r14
132 pushq saved_context_eflags(%rip) ; popfq 132 movq pt_regs_r15(%rax), %r15
133 pushq pt_regs_eflags(%rax)
134 popfq
133 135
134 xorq %rax, %rax 136 xorq %rax, %rax
135 137
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index d78444c788a3..9ebc0dab66b4 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -131,38 +131,43 @@ unsigned long native_calculate_cpu_khz(void)
131{ 131{
132 unsigned long long start, end; 132 unsigned long long start, end;
133 unsigned long count; 133 unsigned long count;
134 u64 delta64; 134 u64 delta64 = (u64)ULLONG_MAX;
135 int i; 135 int i;
136 unsigned long flags; 136 unsigned long flags;
137 137
138 local_irq_save(flags); 138 local_irq_save(flags);
139 139
140 /* run 3 times to ensure the cache is warm */ 140 /* run 3 times to ensure the cache is warm and to get an accurate reading */
141 for (i = 0; i < 3; i++) { 141 for (i = 0; i < 3; i++) {
142 mach_prepare_counter(); 142 mach_prepare_counter();
143 rdtscll(start); 143 rdtscll(start);
144 mach_countup(&count); 144 mach_countup(&count);
145 rdtscll(end); 145 rdtscll(end);
146 }
147 /*
148 * Error: ECTCNEVERSET
149 * The CTC wasn't reliable: we got a hit on the very first read,
150 * or the CPU was so fast/slow that the quotient wouldn't fit in
151 * 32 bits..
152 */
153 if (count <= 1)
154 goto err;
155 146
156 delta64 = end - start; 147 /*
148 * Error: ECTCNEVERSET
149 * The CTC wasn't reliable: we got a hit on the very first read,
150 * or the CPU was so fast/slow that the quotient wouldn't fit in
151 * 32 bits..
152 */
153 if (count <= 1)
154 continue;
155
156 /* cpu freq too slow: */
157 if ((end - start) <= CALIBRATE_TIME_MSEC)
158 continue;
159
160 /*
161 * We want the minimum time of all runs in case one of them
162 * is inaccurate due to SMI or other delay
163 */
164 delta64 = min(delta64, (end - start));
165 }
157 166
158 /* cpu freq too fast: */ 167 /* cpu freq too fast (or every run was bad): */
159 if (delta64 > (1ULL<<32)) 168 if (delta64 > (1ULL<<32))
160 goto err; 169 goto err;
161 170
162 /* cpu freq too slow: */
163 if (delta64 <= CALIBRATE_TIME_MSEC)
164 goto err;
165
166 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ 171 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
167 do_div(delta64,CALIBRATE_TIME_MSEC); 172 do_div(delta64,CALIBRATE_TIME_MSEC);
168 173
diff --git a/arch/x86/oprofile/Kconfig b/arch/x86/oprofile/Kconfig
deleted file mode 100644
index d8a84088471a..000000000000
--- a/arch/x86/oprofile/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
1config PROFILING
2 bool "Profiling support (EXPERIMENTAL)"
3 help
4 Say Y here to enable the extended profiling support mechanisms used
5 by profilers such as OProfile.
6
7
8config OPROFILE
9 tristate "OProfile system profiling (EXPERIMENTAL)"
10 depends on PROFILING
11 help
12 OProfile is a profiling system capable of profiling the
13 whole system, include the kernel, kernel modules, libraries,
14 and applications.
15
16 If unsure, say N.
17
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index c2d24991bb2b..308970aa5382 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -833,6 +833,8 @@ source "drivers/firmware/Kconfig"
833 833
834source fs/Kconfig 834source fs/Kconfig
835 835
836source "kernel/Kconfig.instrumentation"
837
836source "arch/x86_64/Kconfig.debug" 838source "arch/x86_64/Kconfig.debug"
837 839
838source "security/Kconfig" 840source "security/Kconfig"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 6d89ab762ffc..20eb69bd5a6d 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -24,6 +24,12 @@
24# Fill in SRCARCH 24# Fill in SRCARCH
25SRCARCH := x86 25SRCARCH := x86
26 26
27# BITS is used as extension for files which are available in a 32 bit
28# and a 64 bit version to simplify shared Makefiles.
29# e.g.: obj-y += foo_$(BITS).o
30BITS := 64
31export BITS
32
27LDFLAGS := -m elf_x86_64 33LDFLAGS := -m elf_x86_64
28OBJCOPYFLAGS := -O binary -R .note -R .comment -S 34OBJCOPYFLAGS := -O binary -R .note -R .comment -S
29LDFLAGS_vmlinux := 35LDFLAGS_vmlinux :=
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index 5e3539c129b9..12db5a1cdd74 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -11,41 +11,16 @@ header-y += sigcontext32.h
11header-y += ucontext.h 11header-y += ucontext.h
12header-y += vsyscall32.h 12header-y += vsyscall32.h
13 13
14unifdef-y += a.out_32.h
15unifdef-y += a.out_64.h
16unifdef-y += byteorder_32.h
17unifdef-y += byteorder_64.h
18unifdef-y += e820.h 14unifdef-y += e820.h
19unifdef-y += elf_32.h
20unifdef-y += elf_64.h
21unifdef-y += ist.h 15unifdef-y += ist.h
22unifdef-y += mce.h 16unifdef-y += mce.h
23unifdef-y += msgbuf_32.h
24unifdef-y += msgbuf_64.h
25unifdef-y += msr_32.h
26unifdef-y += msr_64.h
27unifdef-y += msr.h 17unifdef-y += msr.h
28unifdef-y += mtrr_32.h
29unifdef-y += mtrr_64.h
30unifdef-y += mtrr.h 18unifdef-y += mtrr.h
31unifdef-y += page_32.h 19unifdef-y += page_32.h
32unifdef-y += page_64.h 20unifdef-y += page_64.h
33unifdef-y += posix_types_32.h 21unifdef-y += posix_types_32.h
34unifdef-y += posix_types_64.h 22unifdef-y += posix_types_64.h
35unifdef-y += ptrace_32.h 23unifdef-y += ptrace.h
36unifdef-y += ptrace_64.h
37unifdef-y += setup_32.h
38unifdef-y += setup_64.h
39unifdef-y += shmbuf_32.h
40unifdef-y += shmbuf_64.h
41unifdef-y += sigcontext_32.h
42unifdef-y += sigcontext_64.h
43unifdef-y += signal_32.h
44unifdef-y += signal_64.h
45unifdef-y += stat_32.h
46unifdef-y += stat_64.h
47unifdef-y += statfs_32.h
48unifdef-y += statfs_64.h
49unifdef-y += unistd_32.h 24unifdef-y += unistd_32.h
50unifdef-y += unistd_64.h 25unifdef-y += unistd_64.h
51unifdef-y += user_32.h 26unifdef-y += user_32.h
diff --git a/include/asm-x86/a.out.h b/include/asm-x86/a.out.h
index 5bc9b1d3b227..a62443e38eb8 100644
--- a/include/asm-x86/a.out.h
+++ b/include/asm-x86/a.out.h
@@ -1,13 +1,30 @@
1#ifndef _ASM_X86_A_OUT_H
2#define _ASM_X86_A_OUT_H
3
4struct exec
5{
6 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
7 unsigned a_text; /* length of text, in bytes */
8 unsigned a_data; /* length of data, in bytes */
9 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
10 unsigned a_syms; /* length of symbol table data in file, in bytes */
11 unsigned a_entry; /* start address */
12 unsigned a_trsize; /* length of relocation info for text, in bytes */
13 unsigned a_drsize; /* length of relocation info for data, in bytes */
14};
15
16#define N_TRSIZE(a) ((a).a_trsize)
17#define N_DRSIZE(a) ((a).a_drsize)
18#define N_SYMSIZE(a) ((a).a_syms)
19
1#ifdef __KERNEL__ 20#ifdef __KERNEL__
21# include <linux/thread_info.h>
22# define STACK_TOP TASK_SIZE
2# ifdef CONFIG_X86_32 23# ifdef CONFIG_X86_32
3# include "a.out_32.h" 24# define STACK_TOP_MAX STACK_TOP
4# else 25# else
5# include "a.out_64.h" 26# define STACK_TOP_MAX TASK_SIZE64
6# endif
7#else
8# ifdef __i386__
9# include "a.out_32.h"
10# else
11# include "a.out_64.h"
12# endif 27# endif
13#endif 28#endif
29
30#endif /* _ASM_X86_A_OUT_H */
diff --git a/include/asm-x86/a.out_32.h b/include/asm-x86/a.out_32.h
deleted file mode 100644
index 851a60f8258c..000000000000
--- a/include/asm-x86/a.out_32.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef __I386_A_OUT_H__
2#define __I386_A_OUT_H__
3
4struct exec
5{
6 unsigned long a_info; /* Use macros N_MAGIC, etc for access */
7 unsigned a_text; /* length of text, in bytes */
8 unsigned a_data; /* length of data, in bytes */
9 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
10 unsigned a_syms; /* length of symbol table data in file, in bytes */
11 unsigned a_entry; /* start address */
12 unsigned a_trsize; /* length of relocation info for text, in bytes */
13 unsigned a_drsize; /* length of relocation info for data, in bytes */
14};
15
16#define N_TRSIZE(a) ((a).a_trsize)
17#define N_DRSIZE(a) ((a).a_drsize)
18#define N_SYMSIZE(a) ((a).a_syms)
19
20#ifdef __KERNEL__
21
22#define STACK_TOP TASK_SIZE
23#define STACK_TOP_MAX STACK_TOP
24
25#endif
26
27#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-x86/a.out_64.h b/include/asm-x86/a.out_64.h
deleted file mode 100644
index e789300e41a5..000000000000
--- a/include/asm-x86/a.out_64.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef __X8664_A_OUT_H__
2#define __X8664_A_OUT_H__
3
4/* 32bit a.out */
5
6struct exec
7{
8 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
9 unsigned a_text; /* length of text, in bytes */
10 unsigned a_data; /* length of data, in bytes */
11 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
12 unsigned a_syms; /* length of symbol table data in file, in bytes */
13 unsigned a_entry; /* start address */
14 unsigned a_trsize; /* length of relocation info for text, in bytes */
15 unsigned a_drsize; /* length of relocation info for data, in bytes */
16};
17
18#define N_TRSIZE(a) ((a).a_trsize)
19#define N_DRSIZE(a) ((a).a_drsize)
20#define N_SYMSIZE(a) ((a).a_syms)
21
22#ifdef __KERNEL__
23#include <linux/thread_info.h>
24#define STACK_TOP TASK_SIZE
25#define STACK_TOP_MAX TASK_SIZE64
26#endif
27
28#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-x86/apic_64.h b/include/asm-x86/apic_64.h
index 3c8f21eef0be..2747a11a2b19 100644
--- a/include/asm-x86/apic_64.h
+++ b/include/asm-x86/apic_64.h
@@ -69,6 +69,7 @@ extern void clear_local_APIC (void);
69extern void connect_bsp_APIC (void); 69extern void connect_bsp_APIC (void);
70extern void disconnect_bsp_APIC (int virt_wire_setup); 70extern void disconnect_bsp_APIC (int virt_wire_setup);
71extern void disable_local_APIC (void); 71extern void disable_local_APIC (void);
72extern void lapic_shutdown (void);
72extern int verify_local_APIC (void); 73extern int verify_local_APIC (void);
73extern void cache_APIC_registers (void); 74extern void cache_APIC_registers (void);
74extern void sync_Arb_IDs (void); 75extern void sync_Arb_IDs (void);
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index 3268a341cf49..36ebb5b02b4f 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -80,6 +80,20 @@ static inline void clear_bit(int nr, volatile unsigned long * addr)
80 :"Ir" (nr)); 80 :"Ir" (nr));
81} 81}
82 82
83/*
84 * clear_bit_unlock - Clears a bit in memory
85 * @nr: Bit to clear
86 * @addr: Address to start counting from
87 *
88 * clear_bit() is atomic and implies release semantics before the memory
89 * operation. It can be used for an unlock.
90 */
91static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
92{
93 barrier();
94 clear_bit(nr, addr);
95}
96
83static inline void __clear_bit(int nr, volatile unsigned long * addr) 97static inline void __clear_bit(int nr, volatile unsigned long * addr)
84{ 98{
85 __asm__ __volatile__( 99 __asm__ __volatile__(
@@ -87,6 +101,25 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr)
87 :"+m" (ADDR) 101 :"+m" (ADDR)
88 :"Ir" (nr)); 102 :"Ir" (nr));
89} 103}
104
105/*
106 * __clear_bit_unlock - Clears a bit in memory
107 * @nr: Bit to clear
108 * @addr: Address to start counting from
109 *
110 * __clear_bit() is non-atomic and implies release semantics before the memory
111 * operation. It can be used for an unlock if no other CPUs can concurrently
112 * modify other bits in the word.
113 *
114 * No memory barrier is required here, because x86 cannot reorder stores past
115 * older loads. Same principle as spin_unlock.
116 */
117static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
118{
119 barrier();
120 __clear_bit(nr, addr);
121}
122
90#define smp_mb__before_clear_bit() barrier() 123#define smp_mb__before_clear_bit() barrier()
91#define smp_mb__after_clear_bit() barrier() 124#define smp_mb__after_clear_bit() barrier()
92 125
@@ -146,6 +179,15 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
146} 179}
147 180
148/** 181/**
182 * test_and_set_bit_lock - Set a bit and return its old value for lock
183 * @nr: Bit to set
184 * @addr: Address to count from
185 *
186 * This is the same as test_and_set_bit on x86
187 */
188#define test_and_set_bit_lock test_and_set_bit
189
190/**
149 * __test_and_set_bit - Set a bit and return its old value 191 * __test_and_set_bit - Set a bit and return its old value
150 * @nr: Bit to set 192 * @nr: Bit to set
151 * @addr: Address to count from 193 * @addr: Address to count from
@@ -406,7 +448,6 @@ static inline int fls(int x)
406} 448}
407 449
408#include <asm-generic/bitops/hweight.h> 450#include <asm-generic/bitops/hweight.h>
409#include <asm-generic/bitops/lock.h>
410 451
411#endif /* __KERNEL__ */ 452#endif /* __KERNEL__ */
412 453
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index dacaa5f1febc..b4d47940b959 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -72,6 +72,20 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
72 :"dIr" (nr)); 72 :"dIr" (nr));
73} 73}
74 74
75/*
76 * clear_bit_unlock - Clears a bit in memory
77 * @nr: Bit to clear
78 * @addr: Address to start counting from
79 *
80 * clear_bit() is atomic and implies release semantics before the memory
81 * operation. It can be used for an unlock.
82 */
83static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
84{
85 barrier();
86 clear_bit(nr, addr);
87}
88
75static __inline__ void __clear_bit(int nr, volatile void * addr) 89static __inline__ void __clear_bit(int nr, volatile void * addr)
76{ 90{
77 __asm__ __volatile__( 91 __asm__ __volatile__(
@@ -80,6 +94,24 @@ static __inline__ void __clear_bit(int nr, volatile void * addr)
80 :"dIr" (nr)); 94 :"dIr" (nr));
81} 95}
82 96
97/*
98 * __clear_bit_unlock - Clears a bit in memory
99 * @nr: Bit to clear
100 * @addr: Address to start counting from
101 *
102 * __clear_bit() is non-atomic and implies release semantics before the memory
103 * operation. It can be used for an unlock if no other CPUs can concurrently
104 * modify other bits in the word.
105 *
106 * No memory barrier is required here, because x86 cannot reorder stores past
107 * older loads. Same principle as spin_unlock.
108 */
109static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
110{
111 barrier();
112 __clear_bit(nr, addr);
113}
114
83#define smp_mb__before_clear_bit() barrier() 115#define smp_mb__before_clear_bit() barrier()
84#define smp_mb__after_clear_bit() barrier() 116#define smp_mb__after_clear_bit() barrier()
85 117
@@ -137,6 +169,15 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
137} 169}
138 170
139/** 171/**
172 * test_and_set_bit_lock - Set a bit and return its old value for lock
173 * @nr: Bit to set
174 * @addr: Address to count from
175 *
176 * This is the same as test_and_set_bit on x86
177 */
178#define test_and_set_bit_lock test_and_set_bit
179
180/**
140 * __test_and_set_bit - Set a bit and return its old value 181 * __test_and_set_bit - Set a bit and return its old value
141 * @nr: Bit to set 182 * @nr: Bit to set
142 * @addr: Address to count from 183 * @addr: Address to count from
@@ -412,7 +453,6 @@ static __inline__ int fls(int x)
412#define ARCH_HAS_FAST_MULTIPLIER 1 453#define ARCH_HAS_FAST_MULTIPLIER 1
413 454
414#include <asm-generic/bitops/hweight.h> 455#include <asm-generic/bitops/hweight.h>
415#include <asm-generic/bitops/lock.h>
416 456
417#endif /* __KERNEL__ */ 457#endif /* __KERNEL__ */
418 458
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
index eb14b1870ed7..1f2d6d5bf20d 100644
--- a/include/asm-x86/byteorder.h
+++ b/include/asm-x86/byteorder.h
@@ -1,13 +1,72 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_BYTEORDER_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_BYTEORDER_H
3# include "byteorder_32.h" 3
4# else 4#include <asm/types.h>
5# include "byteorder_64.h" 5#include <linux/compiler.h>
6# endif 6
7#ifdef __GNUC__
8
9#ifdef __i386__
10
11static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
12{
13#ifdef CONFIG_X86_BSWAP
14 __asm__("bswap %0" : "=r" (x) : "0" (x));
7#else 15#else
8# ifdef __i386__ 16 __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
9# include "byteorder_32.h" 17 "rorl $16,%0\n\t" /* swap words */
10# else 18 "xchgb %b0,%h0" /* swap higher bytes */
11# include "byteorder_64.h" 19 :"=q" (x)
12# endif 20 : "0" (x));
13#endif 21#endif
22 return x;
23}
24
25static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
26{
27 union {
28 struct { __u32 a,b; } s;
29 __u64 u;
30 } v;
31 v.u = val;
32#ifdef CONFIG_X86_BSWAP
33 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
34 : "=r" (v.s.a), "=r" (v.s.b)
35 : "0" (v.s.a), "1" (v.s.b));
36#else
37 v.s.a = ___arch__swab32(v.s.a);
38 v.s.b = ___arch__swab32(v.s.b);
39 asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
40#endif
41 return v.u;
42}
43
44#else /* __i386__ */
45
46static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
47{
48 __asm__("bswapq %0" : "=r" (x) : "0" (x));
49 return x;
50}
51
52static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
53{
54 __asm__("bswapl %0" : "=r" (x) : "0" (x));
55 return x;
56}
57
58#endif
59
60/* Do not define swab16. Gcc is smart enough to recognize "C" version and
61 convert it into rotation or exhange. */
62
63#define __arch__swab64(x) ___arch__swab64(x)
64#define __arch__swab32(x) ___arch__swab32(x)
65
66#define __BYTEORDER_HAS_U64__
67
68#endif /* __GNUC__ */
69
70#include <linux/byteorder/little_endian.h>
71
72#endif /* _ASM_X86_BYTEORDER_H */
diff --git a/include/asm-x86/byteorder_32.h b/include/asm-x86/byteorder_32.h
deleted file mode 100644
index a45470a8b74a..000000000000
--- a/include/asm-x86/byteorder_32.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _I386_BYTEORDER_H
2#define _I386_BYTEORDER_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7#ifdef __GNUC__
8
9/* For avoiding bswap on i386 */
10#ifdef __KERNEL__
11#endif
12
13static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
14{
15#ifdef CONFIG_X86_BSWAP
16 __asm__("bswap %0" : "=r" (x) : "0" (x));
17#else
18 __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
19 "rorl $16,%0\n\t" /* swap words */
20 "xchgb %b0,%h0" /* swap higher bytes */
21 :"=q" (x)
22 : "0" (x));
23#endif
24 return x;
25}
26
27static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
28{
29 union {
30 struct { __u32 a,b; } s;
31 __u64 u;
32 } v;
33 v.u = val;
34#ifdef CONFIG_X86_BSWAP
35 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
36 : "=r" (v.s.a), "=r" (v.s.b)
37 : "0" (v.s.a), "1" (v.s.b));
38#else
39 v.s.a = ___arch__swab32(v.s.a);
40 v.s.b = ___arch__swab32(v.s.b);
41 asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
42#endif
43 return v.u;
44}
45
46/* Do not define swab16. Gcc is smart enough to recognize "C" version and
47 convert it into rotation or exhange. */
48
49#define __arch__swab64(x) ___arch__swab64(x)
50#define __arch__swab32(x) ___arch__swab32(x)
51
52#define __BYTEORDER_HAS_U64__
53
54#endif /* __GNUC__ */
55
56#include <linux/byteorder/little_endian.h>
57
58#endif /* _I386_BYTEORDER_H */
diff --git a/include/asm-x86/byteorder_64.h b/include/asm-x86/byteorder_64.h
deleted file mode 100644
index 5e86c868c75e..000000000000
--- a/include/asm-x86/byteorder_64.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef _X86_64_BYTEORDER_H
2#define _X86_64_BYTEORDER_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7#ifdef __GNUC__
8
9static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
10{
11 __asm__("bswapq %0" : "=r" (x) : "0" (x));
12 return x;
13}
14
15static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
16{
17 __asm__("bswapl %0" : "=r" (x) : "0" (x));
18 return x;
19}
20
21/* Do not define swab16. Gcc is smart enough to recognize "C" version and
22 convert it into rotation or exhange. */
23
24#define __arch__swab32(x) ___arch__swab32(x)
25#define __arch__swab64(x) ___arch__swab64(x)
26
27#endif /* __GNUC__ */
28
29#define __BYTEORDER_HAS_U64__
30
31#include <linux/byteorder/little_endian.h>
32
33#endif /* _X86_64_BYTEORDER_H */
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h
index 8ac7da6ca284..e98d16e7a37a 100644
--- a/include/asm-x86/div64.h
+++ b/include/asm-x86/div64.h
@@ -1,5 +1,59 @@
1#ifndef _ASM_X86_DIV64_H
2#define _ASM_X86_DIV64_H
3
1#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
2# include "div64_32.h" 5
6#include <linux/types.h>
7
8/*
9 * do_div() is NOT a C function. It wants to return
10 * two values (the quotient and the remainder), but
11 * since that doesn't work very well in C, what it
12 * does is:
13 *
14 * - modifies the 64-bit dividend _in_place_
15 * - returns the 32-bit remainder
16 *
17 * This ends up being the most efficient "calling
18 * convention" on x86.
19 */
20#define do_div(n,base) ({ \
21 unsigned long __upper, __low, __high, __mod, __base; \
22 __base = (base); \
23 asm("":"=a" (__low), "=d" (__high):"A" (n)); \
24 __upper = __high; \
25 if (__high) { \
26 __upper = __high % (__base); \
27 __high = __high / (__base); \
28 } \
29 asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
30 asm("":"=A" (n):"a" (__low),"d" (__high)); \
31 __mod; \
32})
33
34/*
35 * (long)X = ((long long)divs) / (long)div
36 * (long)rem = ((long long)divs) % (long)div
37 *
38 * Warning, this will do an exception if X overflows.
39 */
40#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
41
42static inline long
43div_ll_X_l_rem(long long divs, long div, long *rem)
44{
45 long dum2;
46 __asm__("divl %2":"=a"(dum2), "=d"(*rem)
47 : "rm"(div), "A"(divs));
48
49 return dum2;
50
51}
52
53extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
54
3#else 55#else
4# include "div64_64.h" 56# include <asm-generic/div64.h>
5#endif 57#endif /* CONFIG_X86_32 */
58
59#endif /* _ASM_X86_DIV64_H */
diff --git a/include/asm-x86/div64_32.h b/include/asm-x86/div64_32.h
deleted file mode 100644
index 438e980068bd..000000000000
--- a/include/asm-x86/div64_32.h
+++ /dev/null
@@ -1,52 +0,0 @@
1#ifndef __I386_DIV64
2#define __I386_DIV64
3
4#include <linux/types.h>
5
6/*
7 * do_div() is NOT a C function. It wants to return
8 * two values (the quotient and the remainder), but
9 * since that doesn't work very well in C, what it
10 * does is:
11 *
12 * - modifies the 64-bit dividend _in_place_
13 * - returns the 32-bit remainder
14 *
15 * This ends up being the most efficient "calling
16 * convention" on x86.
17 */
18#define do_div(n,base) ({ \
19 unsigned long __upper, __low, __high, __mod, __base; \
20 __base = (base); \
21 asm("":"=a" (__low), "=d" (__high):"A" (n)); \
22 __upper = __high; \
23 if (__high) { \
24 __upper = __high % (__base); \
25 __high = __high / (__base); \
26 } \
27 asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
28 asm("":"=A" (n):"a" (__low),"d" (__high)); \
29 __mod; \
30})
31
32/*
33 * (long)X = ((long long)divs) / (long)div
34 * (long)rem = ((long long)divs) % (long)div
35 *
36 * Warning, this will do an exception if X overflows.
37 */
38#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
39
40static inline long
41div_ll_X_l_rem(long long divs, long div, long *rem)
42{
43 long dum2;
44 __asm__("divl %2":"=a"(dum2), "=d"(*rem)
45 : "rm"(div), "A"(divs));
46
47 return dum2;
48
49}
50
51extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
52#endif
diff --git a/include/asm-x86/div64_64.h b/include/asm-x86/div64_64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/include/asm-x86/div64_64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/div64.h>
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index ed6bb6e546b9..ec42a4d2e83b 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -1,13 +1,290 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_ELF_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_ELF_H
3# include "elf_32.h" 3
4# else 4/*
5# include "elf_64.h" 5 * ELF register definitions..
6# endif 6 */
7
8#include <asm/ptrace.h>
9#include <asm/user.h>
10#include <asm/auxvec.h>
11
12typedef unsigned long elf_greg_t;
13
14#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
15typedef elf_greg_t elf_gregset_t[ELF_NGREG];
16
17typedef struct user_i387_struct elf_fpregset_t;
18
19#ifdef __i386__
20
21typedef struct user_fxsr_struct elf_fpxregset_t;
22
23#define R_386_NONE 0
24#define R_386_32 1
25#define R_386_PC32 2
26#define R_386_GOT32 3
27#define R_386_PLT32 4
28#define R_386_COPY 5
29#define R_386_GLOB_DAT 6
30#define R_386_JMP_SLOT 7
31#define R_386_RELATIVE 8
32#define R_386_GOTOFF 9
33#define R_386_GOTPC 10
34#define R_386_NUM 11
35
36/*
37 * These are used to set parameters in the core dumps.
38 */
39#define ELF_CLASS ELFCLASS32
40#define ELF_DATA ELFDATA2LSB
41#define ELF_ARCH EM_386
42
7#else 43#else
8# ifdef __i386__ 44
9# include "elf_32.h" 45/* x86-64 relocation types */
10# else 46#define R_X86_64_NONE 0 /* No reloc */
11# include "elf_64.h" 47#define R_X86_64_64 1 /* Direct 64 bit */
12# endif 48#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
49#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
50#define R_X86_64_PLT32 4 /* 32 bit PLT address */
51#define R_X86_64_COPY 5 /* Copy symbol at runtime */
52#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
53#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
54#define R_X86_64_RELATIVE 8 /* Adjust by program base */
55#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
56 offset to GOT */
57#define R_X86_64_32 10 /* Direct 32 bit zero extended */
58#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
59#define R_X86_64_16 12 /* Direct 16 bit zero extended */
60#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
61#define R_X86_64_8 14 /* Direct 8 bit sign extended */
62#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
63
64#define R_X86_64_NUM 16
65
66/*
67 * These are used to set parameters in the core dumps.
68 */
69#define ELF_CLASS ELFCLASS64
70#define ELF_DATA ELFDATA2LSB
71#define ELF_ARCH EM_X86_64
72
73#endif
74
75#ifdef __KERNEL__
76
77#ifdef CONFIG_X86_32
78#include <asm/processor.h>
79#include <asm/system.h> /* for savesegment */
80#include <asm/desc.h>
81
82/*
83 * This is used to ensure we don't load something for the wrong architecture.
84 */
85#define elf_check_arch(x) \
86 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
87
88/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
89 contains a pointer to a function which might be registered using `atexit'.
90 This provides a mean for the dynamic linker to call DT_FINI functions for
91 shared libraries that have been loaded before the code runs.
92
93 A value of 0 tells we have no such handler.
94
95 We might as well make sure everything else is cleared too (except for %esp),
96 just to make things more deterministic.
97 */
98#define ELF_PLAT_INIT(_r, load_addr) do { \
99 _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \
100 _r->esi = 0; _r->edi = 0; _r->ebp = 0; \
101 _r->eax = 0; \
102} while (0)
103
104/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
105 now struct_user_regs, they are different) */
106
107#define ELF_CORE_COPY_REGS(pr_reg, regs) \
108 pr_reg[0] = regs->ebx; \
109 pr_reg[1] = regs->ecx; \
110 pr_reg[2] = regs->edx; \
111 pr_reg[3] = regs->esi; \
112 pr_reg[4] = regs->edi; \
113 pr_reg[5] = regs->ebp; \
114 pr_reg[6] = regs->eax; \
115 pr_reg[7] = regs->xds & 0xffff; \
116 pr_reg[8] = regs->xes & 0xffff; \
117 pr_reg[9] = regs->xfs & 0xffff; \
118 savesegment(gs,pr_reg[10]); \
119 pr_reg[11] = regs->orig_eax; \
120 pr_reg[12] = regs->eip; \
121 pr_reg[13] = regs->xcs & 0xffff; \
122 pr_reg[14] = regs->eflags; \
123 pr_reg[15] = regs->esp; \
124 pr_reg[16] = regs->xss & 0xffff;
125
126#define ELF_PLATFORM (utsname()->machine)
127#define set_personality_64bit() do { } while (0)
128extern unsigned int vdso_enabled;
129
130#else /* CONFIG_X86_32 */
131
132#include <asm/processor.h>
133
134/*
135 * This is used to ensure we don't load something for the wrong architecture.
136 */
137#define elf_check_arch(x) \
138 ((x)->e_machine == EM_X86_64)
139
140#define ELF_PLAT_INIT(_r, load_addr) do { \
141 struct task_struct *cur = current; \
142 (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \
143 (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \
144 (_r)->rax = 0; \
145 (_r)->r8 = 0; \
146 (_r)->r9 = 0; \
147 (_r)->r10 = 0; \
148 (_r)->r11 = 0; \
149 (_r)->r12 = 0; \
150 (_r)->r13 = 0; \
151 (_r)->r14 = 0; \
152 (_r)->r15 = 0; \
153 cur->thread.fs = 0; cur->thread.gs = 0; \
154 cur->thread.fsindex = 0; cur->thread.gsindex = 0; \
155 cur->thread.ds = 0; cur->thread.es = 0; \
156 clear_thread_flag(TIF_IA32); \
157} while (0)
158
159/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
160 now struct_user_regs, they are different). Assumes current is the process
161 getting dumped. */
162
163#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
164 unsigned v; \
165 (pr_reg)[0] = (regs)->r15; \
166 (pr_reg)[1] = (regs)->r14; \
167 (pr_reg)[2] = (regs)->r13; \
168 (pr_reg)[3] = (regs)->r12; \
169 (pr_reg)[4] = (regs)->rbp; \
170 (pr_reg)[5] = (regs)->rbx; \
171 (pr_reg)[6] = (regs)->r11; \
172 (pr_reg)[7] = (regs)->r10; \
173 (pr_reg)[8] = (regs)->r9; \
174 (pr_reg)[9] = (regs)->r8; \
175 (pr_reg)[10] = (regs)->rax; \
176 (pr_reg)[11] = (regs)->rcx; \
177 (pr_reg)[12] = (regs)->rdx; \
178 (pr_reg)[13] = (regs)->rsi; \
179 (pr_reg)[14] = (regs)->rdi; \
180 (pr_reg)[15] = (regs)->orig_rax; \
181 (pr_reg)[16] = (regs)->rip; \
182 (pr_reg)[17] = (regs)->cs; \
183 (pr_reg)[18] = (regs)->eflags; \
184 (pr_reg)[19] = (regs)->rsp; \
185 (pr_reg)[20] = (regs)->ss; \
186 (pr_reg)[21] = current->thread.fs; \
187 (pr_reg)[22] = current->thread.gs; \
188 asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
189 asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
190 asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
191 asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
192} while(0);
193
194/* I'm not sure if we can use '-' here */
195#define ELF_PLATFORM ("x86_64")
196extern void set_personality_64bit(void);
197extern int vdso_enabled;
198
199#endif /* !CONFIG_X86_32 */
200
201#define USE_ELF_CORE_DUMP
202#define ELF_EXEC_PAGESIZE 4096
203
204/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
205 use of this is to invoke "./ld.so someprog" to test out a new version of
206 the loader. We need to make sure that it is out of the way of the program
207 that it will "exec", and that there is sufficient room for the brk. */
208
209#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
210
211/* This yields a mask that user programs can use to figure out what
212 instruction set this CPU supports. This could be done in user space,
213 but it's not easy, and we've already done it here. */
214
215#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
216
217/* This yields a string that ld.so will use to load implementation
218 specific libraries for optimization. This is more specific in
219 intent than poking at uname or /proc/cpuinfo.
220
221 For the moment, we have only optimizations for the Intel generations,
222 but that could change... */
223
224#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
225
226/*
227 * An executable for which elf_read_implies_exec() returns TRUE will
228 * have the READ_IMPLIES_EXEC personality flag set automatically.
229 */
230#define elf_read_implies_exec(ex, executable_stack) \
231 (executable_stack != EXSTACK_DISABLE_X)
232
233struct task_struct;
234
235extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
236extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
237
238#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
239#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
240
241#ifdef CONFIG_X86_32
242extern int dump_task_extended_fpu (struct task_struct *,
243 struct user_fxsr_struct *);
244#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) \
245 dump_task_extended_fpu(tsk, elf_xfpregs)
246#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
247
248#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
249#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
250#define VDSO_PRELINK 0
251
252#define VDSO_SYM(x) \
253 (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK)
254
255#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
256#define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE)
257
258extern void __kernel_vsyscall;
259
260#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
261
262/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
263
264#define ARCH_DLINFO \
265do if (vdso_enabled) { \
266 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
267 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
268} while (0)
269
270#else /* CONFIG_X86_32 */
271
272/* 1GB for 64bit, 8MB for 32bit */
273#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
274
275#define ARCH_DLINFO \
276do if (vdso_enabled) { \
277 NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
278} while (0)
279
280#endif /* !CONFIG_X86_32 */
281
282struct linux_binprm;
283
284#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
285extern int arch_setup_additional_pages(struct linux_binprm *bprm,
286 int executable_stack);
287
288#endif /* __KERNEL__ */
289
13#endif 290#endif
diff --git a/include/asm-x86/elf_32.h b/include/asm-x86/elf_32.h
deleted file mode 100644
index b3f694eaaf37..000000000000
--- a/include/asm-x86/elf_32.h
+++ /dev/null
@@ -1,165 +0,0 @@
1#ifndef __ASMi386_ELF_H
2#define __ASMi386_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/user.h>
10#include <asm/auxvec.h>
11
12#define R_386_NONE 0
13#define R_386_32 1
14#define R_386_PC32 2
15#define R_386_GOT32 3
16#define R_386_PLT32 4
17#define R_386_COPY 5
18#define R_386_GLOB_DAT 6
19#define R_386_JMP_SLOT 7
20#define R_386_RELATIVE 8
21#define R_386_GOTOFF 9
22#define R_386_GOTPC 10
23#define R_386_NUM 11
24
25typedef unsigned long elf_greg_t;
26
27#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
28typedef elf_greg_t elf_gregset_t[ELF_NGREG];
29
30typedef struct user_i387_struct elf_fpregset_t;
31typedef struct user_fxsr_struct elf_fpxregset_t;
32
33/*
34 * This is used to ensure we don't load something for the wrong architecture.
35 */
36#define elf_check_arch(x) \
37 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
38
39/*
40 * These are used to set parameters in the core dumps.
41 */
42#define ELF_CLASS ELFCLASS32
43#define ELF_DATA ELFDATA2LSB
44#define ELF_ARCH EM_386
45
46#ifdef __KERNEL__
47
48#include <asm/processor.h>
49#include <asm/system.h> /* for savesegment */
50#include <asm/desc.h>
51
52/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
53 contains a pointer to a function which might be registered using `atexit'.
54 This provides a mean for the dynamic linker to call DT_FINI functions for
55 shared libraries that have been loaded before the code runs.
56
57 A value of 0 tells we have no such handler.
58
59 We might as well make sure everything else is cleared too (except for %esp),
60 just to make things more deterministic.
61 */
62#define ELF_PLAT_INIT(_r, load_addr) do { \
63 _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \
64 _r->esi = 0; _r->edi = 0; _r->ebp = 0; \
65 _r->eax = 0; \
66} while (0)
67
68#define USE_ELF_CORE_DUMP
69#define ELF_EXEC_PAGESIZE 4096
70
71/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
72 use of this is to invoke "./ld.so someprog" to test out a new version of
73 the loader. We need to make sure that it is out of the way of the program
74 that it will "exec", and that there is sufficient room for the brk. */
75
76#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
77
78/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
79 now struct_user_regs, they are different) */
80
81#define ELF_CORE_COPY_REGS(pr_reg, regs) \
82 pr_reg[0] = regs->ebx; \
83 pr_reg[1] = regs->ecx; \
84 pr_reg[2] = regs->edx; \
85 pr_reg[3] = regs->esi; \
86 pr_reg[4] = regs->edi; \
87 pr_reg[5] = regs->ebp; \
88 pr_reg[6] = regs->eax; \
89 pr_reg[7] = regs->xds & 0xffff; \
90 pr_reg[8] = regs->xes & 0xffff; \
91 pr_reg[9] = regs->xfs & 0xffff; \
92 savesegment(gs,pr_reg[10]); \
93 pr_reg[11] = regs->orig_eax; \
94 pr_reg[12] = regs->eip; \
95 pr_reg[13] = regs->xcs & 0xffff; \
96 pr_reg[14] = regs->eflags; \
97 pr_reg[15] = regs->esp; \
98 pr_reg[16] = regs->xss & 0xffff;
99
100/* This yields a mask that user programs can use to figure out what
101 instruction set this CPU supports. This could be done in user space,
102 but it's not easy, and we've already done it here. */
103
104#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
105
106/* This yields a string that ld.so will use to load implementation
107 specific libraries for optimization. This is more specific in
108 intent than poking at uname or /proc/cpuinfo.
109
110 For the moment, we have only optimizations for the Intel generations,
111 but that could change... */
112
113#define ELF_PLATFORM (utsname()->machine)
114
115#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
116
117/*
118 * An executable for which elf_read_implies_exec() returns TRUE will
119 * have the READ_IMPLIES_EXEC personality flag set automatically.
120 */
121#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
122
123struct task_struct;
124
125extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
126extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
127extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *);
128
129#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
130#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
131#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
132#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
133
134#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
135#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
136#define VDSO_PRELINK 0
137
138#define VDSO_SYM(x) \
139 (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK)
140
141#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
142#define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE)
143
144extern void __kernel_vsyscall;
145
146#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
147
148struct linux_binprm;
149
150#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
151extern int arch_setup_additional_pages(struct linux_binprm *bprm,
152 int executable_stack);
153
154extern unsigned int vdso_enabled;
155
156/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
157#define ARCH_DLINFO \
158do if (vdso_enabled) { \
159 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
160 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
161} while (0)
162
163#endif
164
165#endif
diff --git a/include/asm-x86/elf_64.h b/include/asm-x86/elf_64.h
deleted file mode 100644
index b4fbe47f6ccd..000000000000
--- a/include/asm-x86/elf_64.h
+++ /dev/null
@@ -1,180 +0,0 @@
1#ifndef __ASM_X86_64_ELF_H
2#define __ASM_X86_64_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/user.h>
10
11/* x86-64 relocation types */
12#define R_X86_64_NONE 0 /* No reloc */
13#define R_X86_64_64 1 /* Direct 64 bit */
14#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
15#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
16#define R_X86_64_PLT32 4 /* 32 bit PLT address */
17#define R_X86_64_COPY 5 /* Copy symbol at runtime */
18#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
19#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
20#define R_X86_64_RELATIVE 8 /* Adjust by program base */
21#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
22 offset to GOT */
23#define R_X86_64_32 10 /* Direct 32 bit zero extended */
24#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
25#define R_X86_64_16 12 /* Direct 16 bit zero extended */
26#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
27#define R_X86_64_8 14 /* Direct 8 bit sign extended */
28#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
29
30#define R_X86_64_NUM 16
31
32typedef unsigned long elf_greg_t;
33
34#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
35typedef elf_greg_t elf_gregset_t[ELF_NGREG];
36
37typedef struct user_i387_struct elf_fpregset_t;
38
39/*
40 * These are used to set parameters in the core dumps.
41 */
42#define ELF_CLASS ELFCLASS64
43#define ELF_DATA ELFDATA2LSB
44#define ELF_ARCH EM_X86_64
45
46#ifdef __KERNEL__
47#include <asm/processor.h>
48
49/*
50 * This is used to ensure we don't load something for the wrong architecture.
51 */
52#define elf_check_arch(x) \
53 ((x)->e_machine == EM_X86_64)
54
55
56/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
57 contains a pointer to a function which might be registered using `atexit'.
58 This provides a mean for the dynamic linker to call DT_FINI functions for
59 shared libraries that have been loaded before the code runs.
60
61 A value of 0 tells we have no such handler.
62
63 We might as well make sure everything else is cleared too (except for %esp),
64 just to make things more deterministic.
65 */
66#define ELF_PLAT_INIT(_r, load_addr) do { \
67 struct task_struct *cur = current; \
68 (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \
69 (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \
70 (_r)->rax = 0; \
71 (_r)->r8 = 0; \
72 (_r)->r9 = 0; \
73 (_r)->r10 = 0; \
74 (_r)->r11 = 0; \
75 (_r)->r12 = 0; \
76 (_r)->r13 = 0; \
77 (_r)->r14 = 0; \
78 (_r)->r15 = 0; \
79 cur->thread.fs = 0; cur->thread.gs = 0; \
80 cur->thread.fsindex = 0; cur->thread.gsindex = 0; \
81 cur->thread.ds = 0; cur->thread.es = 0; \
82 clear_thread_flag(TIF_IA32); \
83} while (0)
84
85#define USE_ELF_CORE_DUMP
86#define ELF_EXEC_PAGESIZE 4096
87
88/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
89 use of this is to invoke "./ld.so someprog" to test out a new version of
90 the loader. We need to make sure that it is out of the way of the program
91 that it will "exec", and that there is sufficient room for the brk. */
92
93#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
94
95/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
96 now struct_user_regs, they are different). Assumes current is the process
97 getting dumped. */
98
99#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
100 unsigned v; \
101 (pr_reg)[0] = (regs)->r15; \
102 (pr_reg)[1] = (regs)->r14; \
103 (pr_reg)[2] = (regs)->r13; \
104 (pr_reg)[3] = (regs)->r12; \
105 (pr_reg)[4] = (regs)->rbp; \
106 (pr_reg)[5] = (regs)->rbx; \
107 (pr_reg)[6] = (regs)->r11; \
108 (pr_reg)[7] = (regs)->r10; \
109 (pr_reg)[8] = (regs)->r9; \
110 (pr_reg)[9] = (regs)->r8; \
111 (pr_reg)[10] = (regs)->rax; \
112 (pr_reg)[11] = (regs)->rcx; \
113 (pr_reg)[12] = (regs)->rdx; \
114 (pr_reg)[13] = (regs)->rsi; \
115 (pr_reg)[14] = (regs)->rdi; \
116 (pr_reg)[15] = (regs)->orig_rax; \
117 (pr_reg)[16] = (regs)->rip; \
118 (pr_reg)[17] = (regs)->cs; \
119 (pr_reg)[18] = (regs)->eflags; \
120 (pr_reg)[19] = (regs)->rsp; \
121 (pr_reg)[20] = (regs)->ss; \
122 (pr_reg)[21] = current->thread.fs; \
123 (pr_reg)[22] = current->thread.gs; \
124 asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
125 asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
126 asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
127 asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
128} while(0);
129
130/* This yields a mask that user programs can use to figure out what
131 instruction set this CPU supports. This could be done in user space,
132 but it's not easy, and we've already done it here. */
133
134#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
135
136/* This yields a string that ld.so will use to load implementation
137 specific libraries for optimization. This is more specific in
138 intent than poking at uname or /proc/cpuinfo.
139
140 For the moment, we have only optimizations for the Intel generations,
141 but that could change... */
142
143/* I'm not sure if we can use '-' here */
144#define ELF_PLATFORM ("x86_64")
145
146extern void set_personality_64bit(void);
147#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
148/*
149 * An executable for which elf_read_implies_exec() returns TRUE will
150 * have the READ_IMPLIES_EXEC personality flag set automatically.
151 */
152#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
153
154struct task_struct;
155
156extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
157extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
158
159#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
160#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
161
162/* 1GB for 64bit, 8MB for 32bit */
163#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
164
165
166#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
167struct linux_binprm;
168extern int arch_setup_additional_pages(struct linux_binprm *bprm,
169 int executable_stack);
170
171extern int vdso_enabled;
172
173#define ARCH_DLINFO \
174do if (vdso_enabled) { \
175 NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
176} while (0)
177
178#endif
179
180#endif
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index 9c628cd70e23..3f922c8e1c88 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -1,5 +1,23 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_MMU_H
2# include "mmu_32.h" 2#define _ASM_X86_MMU_H
3#else 3
4# include "mmu_64.h" 4#include <linux/spinlock.h>
5#include <linux/mutex.h>
6
7/*
8 * The x86 doesn't have a mmu context, but
9 * we put the segment information here.
10 *
11 * cpu_vm_mask is used to optimize ldt flushing.
12 */
13typedef struct {
14 void *ldt;
15#ifdef CONFIG_X86_64
16 rwlock_t ldtlock;
5#endif 17#endif
18 int size;
19 struct mutex lock;
20 void *vdso;
21} mm_context_t;
22
23#endif /* _ASM_X86_MMU_H */
diff --git a/include/asm-x86/mmu_32.h b/include/asm-x86/mmu_32.h
deleted file mode 100644
index 5e249c51ef56..000000000000
--- a/include/asm-x86/mmu_32.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef __i386_MMU_H
2#define __i386_MMU_H
3
4#include <linux/mutex.h>
5/*
6 * The i386 doesn't have a mmu context, but
7 * we put the segment information here.
8 *
9 * cpu_vm_mask is used to optimize ldt flushing.
10 */
11typedef struct {
12 int size;
13 struct mutex lock;
14 void *ldt;
15 void *vdso;
16} mm_context_t;
17
18#endif
diff --git a/include/asm-x86/mmu_64.h b/include/asm-x86/mmu_64.h
deleted file mode 100644
index 024357c27222..000000000000
--- a/include/asm-x86/mmu_64.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef __x86_64_MMU_H
2#define __x86_64_MMU_H
3
4#include <linux/spinlock.h>
5#include <linux/mutex.h>
6
7/*
8 * The x86_64 doesn't have a mmu context, but
9 * we put the segment information here.
10 *
11 * cpu_vm_mask is used to optimize ldt flushing.
12 */
13typedef struct {
14 void *ldt;
15 rwlock_t ldtlock;
16 int size;
17 struct mutex lock;
18 void *vdso;
19} mm_context_t;
20
21#endif
diff --git a/include/asm-x86/msgbuf.h b/include/asm-x86/msgbuf.h
index 154f7d64e862..7e4e9481f51c 100644
--- a/include/asm-x86/msgbuf.h
+++ b/include/asm-x86/msgbuf.h
@@ -1,13 +1,39 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_MSGBUF_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_MSGBUF_H
3# include "msgbuf_32.h" 3
4# else 4/*
5# include "msgbuf_64.h" 5 * The msqid64_ds structure for i386 architecture.
6# endif 6 * Note extra padding because this structure is passed back and forth
7#else 7 * between kernel and user space.
8# ifdef __i386__ 8 *
9# include "msgbuf_32.h" 9 * Pad space on i386 is left for:
10# else 10 * - 64-bit time_t to solve y2038 problem
11# include "msgbuf_64.h" 11 * - 2 miscellaneous 32-bit values
12# endif 12 *
13 * Pad space on x8664 is left for:
14 * - 2 miscellaneous 64-bit values
15 */
16struct msqid64_ds {
17 struct ipc64_perm msg_perm;
18 __kernel_time_t msg_stime; /* last msgsnd time */
19#ifdef __i386__
20 unsigned long __unused1;
13#endif 21#endif
22 __kernel_time_t msg_rtime; /* last msgrcv time */
23#ifdef __i386__
24 unsigned long __unused2;
25#endif
26 __kernel_time_t msg_ctime; /* last change time */
27#ifdef __i386__
28 unsigned long __unused3;
29#endif
30 unsigned long msg_cbytes; /* current number of bytes on queue */
31 unsigned long msg_qnum; /* number of messages in queue */
32 unsigned long msg_qbytes; /* max number of bytes on queue */
33 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
34 __kernel_pid_t msg_lrpid; /* last receive pid */
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39#endif /* _ASM_X86_MSGBUF_H */
diff --git a/include/asm-x86/msgbuf_32.h b/include/asm-x86/msgbuf_32.h
deleted file mode 100644
index b8d659c157ae..000000000000
--- a/include/asm-x86/msgbuf_32.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _I386_MSGBUF_H
2#define _I386_MSGBUF_H
3
4/*
5 * The msqid64_ds structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct msqid64_ds {
15 struct ipc64_perm msg_perm;
16 __kernel_time_t msg_stime; /* last msgsnd time */
17 unsigned long __unused1;
18 __kernel_time_t msg_rtime; /* last msgrcv time */
19 unsigned long __unused2;
20 __kernel_time_t msg_ctime; /* last change time */
21 unsigned long __unused3;
22 unsigned long msg_cbytes; /* current number of bytes on queue */
23 unsigned long msg_qnum; /* number of messages in queue */
24 unsigned long msg_qbytes; /* max number of bytes on queue */
25 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
26 __kernel_pid_t msg_lrpid; /* last receive pid */
27 unsigned long __unused4;
28 unsigned long __unused5;
29};
30
31#endif /* _I386_MSGBUF_H */
diff --git a/include/asm-x86/msgbuf_64.h b/include/asm-x86/msgbuf_64.h
deleted file mode 100644
index cd6f95dd54da..000000000000
--- a/include/asm-x86/msgbuf_64.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef _X8664_MSGBUF_H
2#define _X8664_MSGBUF_H
3
4/*
5 * The msqid64_ds structure for x86-64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct msqid64_ds {
14 struct ipc64_perm msg_perm;
15 __kernel_time_t msg_stime; /* last msgsnd time */
16 __kernel_time_t msg_rtime; /* last msgrcv time */
17 __kernel_time_t msg_ctime; /* last change time */
18 unsigned long msg_cbytes; /* current number of bytes on queue */
19 unsigned long msg_qnum; /* number of messages in queue */
20 unsigned long msg_qbytes; /* max number of bytes on queue */
21 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
22 __kernel_pid_t msg_lrpid; /* last receive pid */
23 unsigned long __unused4;
24 unsigned long __unused5;
25};
26
27#endif
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 2f87ce007002..ba4b31432120 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -1,13 +1,350 @@
1#ifndef __ASM_X86_MSR_H_
2#define __ASM_X86_MSR_H_
3
4#include <asm/msr-index.h>
5
6#ifdef __i386__
7
1#ifdef __KERNEL__ 8#ifdef __KERNEL__
2# ifdef CONFIG_X86_32 9#ifndef __ASSEMBLY__
3# include "msr_32.h" 10
4# else 11#include <asm/errno.h>
5# include "msr_64.h" 12
6# endif 13static inline unsigned long long native_read_msr(unsigned int msr)
14{
15 unsigned long long val;
16
17 asm volatile("rdmsr" : "=A" (val) : "c" (msr));
18 return val;
19}
20
21static inline unsigned long long native_read_msr_safe(unsigned int msr,
22 int *err)
23{
24 unsigned long long val;
25
26 asm volatile("2: rdmsr ; xorl %0,%0\n"
27 "1:\n\t"
28 ".section .fixup,\"ax\"\n\t"
29 "3: movl %3,%0 ; jmp 1b\n\t"
30 ".previous\n\t"
31 ".section __ex_table,\"a\"\n"
32 " .align 4\n\t"
33 " .long 2b,3b\n\t"
34 ".previous"
35 : "=r" (*err), "=A" (val)
36 : "c" (msr), "i" (-EFAULT));
37
38 return val;
39}
40
41static inline void native_write_msr(unsigned int msr, unsigned long long val)
42{
43 asm volatile("wrmsr" : : "c" (msr), "A"(val));
44}
45
46static inline int native_write_msr_safe(unsigned int msr,
47 unsigned long long val)
48{
49 int err;
50 asm volatile("2: wrmsr ; xorl %0,%0\n"
51 "1:\n\t"
52 ".section .fixup,\"ax\"\n\t"
53 "3: movl %4,%0 ; jmp 1b\n\t"
54 ".previous\n\t"
55 ".section __ex_table,\"a\"\n"
56 " .align 4\n\t"
57 " .long 2b,3b\n\t"
58 ".previous"
59 : "=a" (err)
60 : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
61 "i" (-EFAULT));
62 return err;
63}
64
65static inline unsigned long long native_read_tsc(void)
66{
67 unsigned long long val;
68 asm volatile("rdtsc" : "=A" (val));
69 return val;
70}
71
72static inline unsigned long long native_read_pmc(void)
73{
74 unsigned long long val;
75 asm volatile("rdpmc" : "=A" (val));
76 return val;
77}
78
79#ifdef CONFIG_PARAVIRT
80#include <asm/paravirt.h>
7#else 81#else
8# ifdef __i386__ 82#include <linux/errno.h>
9# include "msr_32.h" 83/*
10# else 84 * Access to machine-specific registers (available on 586 and better only)
11# include "msr_64.h" 85 * Note: the rd* operations modify the parameters directly (without using
12# endif 86 * pointer indirection), this allows gcc to optimize better
87 */
88
89#define rdmsr(msr,val1,val2) \
90 do { \
91 u64 __val = native_read_msr(msr); \
92 (val1) = (u32)__val; \
93 (val2) = (u32)(__val >> 32); \
94 } while(0)
95
96static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
97{
98 native_write_msr(__msr, ((u64)__high << 32) | __low);
99}
100
101#define rdmsrl(msr,val) \
102 ((val) = native_read_msr(msr))
103
104#define wrmsrl(msr,val) native_write_msr(msr, val)
105
106/* wrmsr with exception handling */
107static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
108{
109 return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
110}
111
112/* rdmsr with exception handling */
113#define rdmsr_safe(msr,p1,p2) \
114 ({ \
115 int __err; \
116 u64 __val = native_read_msr_safe(msr, &__err); \
117 (*p1) = (u32)__val; \
118 (*p2) = (u32)(__val >> 32); \
119 __err; \
120 })
121
122#define rdtscl(low) \
123 ((low) = (u32)native_read_tsc())
124
125#define rdtscll(val) \
126 ((val) = native_read_tsc())
127
128#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
129
130#define rdpmc(counter,low,high) \
131 do { \
132 u64 _l = native_read_pmc(); \
133 (low) = (u32)_l; \
134 (high) = (u32)(_l >> 32); \
135 } while(0)
136#endif /* !CONFIG_PARAVIRT */
137
138#ifdef CONFIG_SMP
139void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
140void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
141int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
142int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
143#else /* CONFIG_SMP */
144static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
145{
146 rdmsr(msr_no, *l, *h);
147}
148static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
149{
150 wrmsr(msr_no, l, h);
151}
152static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
153{
154 return rdmsr_safe(msr_no, l, h);
155}
156static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
157{
158 return wrmsr_safe(msr_no, l, h);
159}
160#endif /* CONFIG_SMP */
161#endif /* ! __ASSEMBLY__ */
162#endif /* __KERNEL__ */
163
164#else /* __i386__ */
165
166#ifndef __ASSEMBLY__
167#include <linux/errno.h>
168/*
169 * Access to machine-specific registers (available on 586 and better only)
170 * Note: the rd* operations modify the parameters directly (without using
171 * pointer indirection), this allows gcc to optimize better
172 */
173
174#define rdmsr(msr,val1,val2) \
175 __asm__ __volatile__("rdmsr" \
176 : "=a" (val1), "=d" (val2) \
177 : "c" (msr))
178
179
180#define rdmsrl(msr,val) do { unsigned long a__,b__; \
181 __asm__ __volatile__("rdmsr" \
182 : "=a" (a__), "=d" (b__) \
183 : "c" (msr)); \
184 val = a__ | (b__<<32); \
185} while(0)
186
187#define wrmsr(msr,val1,val2) \
188 __asm__ __volatile__("wrmsr" \
189 : /* no outputs */ \
190 : "c" (msr), "a" (val1), "d" (val2))
191
192#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
193
194/* wrmsr with exception handling */
195#define wrmsr_safe(msr,a,b) ({ int ret__; \
196 asm volatile("2: wrmsr ; xorl %0,%0\n" \
197 "1:\n\t" \
198 ".section .fixup,\"ax\"\n\t" \
199 "3: movl %4,%0 ; jmp 1b\n\t" \
200 ".previous\n\t" \
201 ".section __ex_table,\"a\"\n" \
202 " .align 8\n\t" \
203 " .quad 2b,3b\n\t" \
204 ".previous" \
205 : "=a" (ret__) \
206 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
207 ret__; })
208
209#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
210
211#define rdmsr_safe(msr,a,b) \
212 ({ int ret__; \
213 asm volatile ("1: rdmsr\n" \
214 "2:\n" \
215 ".section .fixup,\"ax\"\n" \
216 "3: movl %4,%0\n" \
217 " jmp 2b\n" \
218 ".previous\n" \
219 ".section __ex_table,\"a\"\n" \
220 " .align 8\n" \
221 " .quad 1b,3b\n" \
222 ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
223 :"c"(msr), "i"(-EIO), "0"(0)); \
224 ret__; })
225
226#define rdtsc(low,high) \
227 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
228
229#define rdtscl(low) \
230 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
231
232#define rdtscp(low,high,aux) \
233 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
234
235#define rdtscll(val) do { \
236 unsigned int __a,__d; \
237 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
238 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
239} while(0)
240
241#define rdtscpll(val, aux) do { \
242 unsigned long __a, __d; \
243 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
244 (val) = (__d << 32) | __a; \
245} while (0)
246
247#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
248
249#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
250
251#define rdpmc(counter,low,high) \
252 __asm__ __volatile__("rdpmc" \
253 : "=a" (low), "=d" (high) \
254 : "c" (counter))
255
256static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
257 unsigned int *ecx, unsigned int *edx)
258{
259 __asm__("cpuid"
260 : "=a" (*eax),
261 "=b" (*ebx),
262 "=c" (*ecx),
263 "=d" (*edx)
264 : "0" (op));
265}
266
267/* Some CPUID calls want 'count' to be placed in ecx */
268static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
269 int *edx)
270{
271 __asm__("cpuid"
272 : "=a" (*eax),
273 "=b" (*ebx),
274 "=c" (*ecx),
275 "=d" (*edx)
276 : "0" (op), "c" (count));
277}
278
279/*
280 * CPUID functions returning a single datum
281 */
282static inline unsigned int cpuid_eax(unsigned int op)
283{
284 unsigned int eax;
285
286 __asm__("cpuid"
287 : "=a" (eax)
288 : "0" (op)
289 : "bx", "cx", "dx");
290 return eax;
291}
292static inline unsigned int cpuid_ebx(unsigned int op)
293{
294 unsigned int eax, ebx;
295
296 __asm__("cpuid"
297 : "=a" (eax), "=b" (ebx)
298 : "0" (op)
299 : "cx", "dx" );
300 return ebx;
301}
302static inline unsigned int cpuid_ecx(unsigned int op)
303{
304 unsigned int eax, ecx;
305
306 __asm__("cpuid"
307 : "=a" (eax), "=c" (ecx)
308 : "0" (op)
309 : "bx", "dx" );
310 return ecx;
311}
312static inline unsigned int cpuid_edx(unsigned int op)
313{
314 unsigned int eax, edx;
315
316 __asm__("cpuid"
317 : "=a" (eax), "=d" (edx)
318 : "0" (op)
319 : "bx", "cx");
320 return edx;
321}
322
323#ifdef CONFIG_SMP
324void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
325void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
326int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
327int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
328#else /* CONFIG_SMP */
329static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
330{
331 rdmsr(msr_no, *l, *h);
332}
333static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
334{
335 wrmsr(msr_no, l, h);
336}
337static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
338{
339 return rdmsr_safe(msr_no, l, h);
340}
341static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
342{
343 return wrmsr_safe(msr_no, l, h);
344}
345#endif /* CONFIG_SMP */
346#endif /* __ASSEMBLY__ */
347
348#endif /* !__i386__ */
349
13#endif 350#endif
diff --git a/include/asm-x86/msr_32.h b/include/asm-x86/msr_32.h
deleted file mode 100644
index df21ea049369..000000000000
--- a/include/asm-x86/msr_32.h
+++ /dev/null
@@ -1,161 +0,0 @@
1#ifndef __ASM_MSR_H
2#define __ASM_MSR_H
3
4#include <asm/msr-index.h>
5
6#ifdef __KERNEL__
7#ifndef __ASSEMBLY__
8
9#include <asm/errno.h>
10
11static inline unsigned long long native_read_msr(unsigned int msr)
12{
13 unsigned long long val;
14
15 asm volatile("rdmsr" : "=A" (val) : "c" (msr));
16 return val;
17}
18
19static inline unsigned long long native_read_msr_safe(unsigned int msr,
20 int *err)
21{
22 unsigned long long val;
23
24 asm volatile("2: rdmsr ; xorl %0,%0\n"
25 "1:\n\t"
26 ".section .fixup,\"ax\"\n\t"
27 "3: movl %3,%0 ; jmp 1b\n\t"
28 ".previous\n\t"
29 ".section __ex_table,\"a\"\n"
30 " .align 4\n\t"
31 " .long 2b,3b\n\t"
32 ".previous"
33 : "=r" (*err), "=A" (val)
34 : "c" (msr), "i" (-EFAULT));
35
36 return val;
37}
38
39static inline void native_write_msr(unsigned int msr, unsigned long long val)
40{
41 asm volatile("wrmsr" : : "c" (msr), "A"(val));
42}
43
44static inline int native_write_msr_safe(unsigned int msr,
45 unsigned long long val)
46{
47 int err;
48 asm volatile("2: wrmsr ; xorl %0,%0\n"
49 "1:\n\t"
50 ".section .fixup,\"ax\"\n\t"
51 "3: movl %4,%0 ; jmp 1b\n\t"
52 ".previous\n\t"
53 ".section __ex_table,\"a\"\n"
54 " .align 4\n\t"
55 " .long 2b,3b\n\t"
56 ".previous"
57 : "=a" (err)
58 : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
59 "i" (-EFAULT));
60 return err;
61}
62
63static inline unsigned long long native_read_tsc(void)
64{
65 unsigned long long val;
66 asm volatile("rdtsc" : "=A" (val));
67 return val;
68}
69
70static inline unsigned long long native_read_pmc(void)
71{
72 unsigned long long val;
73 asm volatile("rdpmc" : "=A" (val));
74 return val;
75}
76
77#ifdef CONFIG_PARAVIRT
78#include <asm/paravirt.h>
79#else
80#include <linux/errno.h>
81/*
82 * Access to machine-specific registers (available on 586 and better only)
83 * Note: the rd* operations modify the parameters directly (without using
84 * pointer indirection), this allows gcc to optimize better
85 */
86
87#define rdmsr(msr,val1,val2) \
88 do { \
89 u64 __val = native_read_msr(msr); \
90 (val1) = (u32)__val; \
91 (val2) = (u32)(__val >> 32); \
92 } while(0)
93
94static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
95{
96 native_write_msr(__msr, ((u64)__high << 32) | __low);
97}
98
99#define rdmsrl(msr,val) \
100 ((val) = native_read_msr(msr))
101
102#define wrmsrl(msr,val) native_write_msr(msr, val)
103
104/* wrmsr with exception handling */
105static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
106{
107 return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
108}
109
110/* rdmsr with exception handling */
111#define rdmsr_safe(msr,p1,p2) \
112 ({ \
113 int __err; \
114 u64 __val = native_read_msr_safe(msr, &__err); \
115 (*p1) = (u32)__val; \
116 (*p2) = (u32)(__val >> 32); \
117 __err; \
118 })
119
120#define rdtscl(low) \
121 ((low) = (u32)native_read_tsc())
122
123#define rdtscll(val) \
124 ((val) = native_read_tsc())
125
126#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
127
128#define rdpmc(counter,low,high) \
129 do { \
130 u64 _l = native_read_pmc(); \
131 (low) = (u32)_l; \
132 (high) = (u32)(_l >> 32); \
133 } while(0)
134#endif /* !CONFIG_PARAVIRT */
135
136#ifdef CONFIG_SMP
137void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
138void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
139int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
140int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
141#else /* CONFIG_SMP */
142static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
143{
144 rdmsr(msr_no, *l, *h);
145}
146static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
147{
148 wrmsr(msr_no, l, h);
149}
150static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
151{
152 return rdmsr_safe(msr_no, l, h);
153}
154static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
155{
156 return wrmsr_safe(msr_no, l, h);
157}
158#endif /* CONFIG_SMP */
159#endif
160#endif
161#endif /* __ASM_MSR_H */
diff --git a/include/asm-x86/msr_64.h b/include/asm-x86/msr_64.h
deleted file mode 100644
index d5c55b80da54..000000000000
--- a/include/asm-x86/msr_64.h
+++ /dev/null
@@ -1,187 +0,0 @@
1#ifndef X86_64_MSR_H
2#define X86_64_MSR_H 1
3
4#include <asm/msr-index.h>
5
6#ifndef __ASSEMBLY__
7#include <linux/errno.h>
8/*
9 * Access to machine-specific registers (available on 586 and better only)
10 * Note: the rd* operations modify the parameters directly (without using
11 * pointer indirection), this allows gcc to optimize better
12 */
13
14#define rdmsr(msr,val1,val2) \
15 __asm__ __volatile__("rdmsr" \
16 : "=a" (val1), "=d" (val2) \
17 : "c" (msr))
18
19
20#define rdmsrl(msr,val) do { unsigned long a__,b__; \
21 __asm__ __volatile__("rdmsr" \
22 : "=a" (a__), "=d" (b__) \
23 : "c" (msr)); \
24 val = a__ | (b__<<32); \
25} while(0)
26
27#define wrmsr(msr,val1,val2) \
28 __asm__ __volatile__("wrmsr" \
29 : /* no outputs */ \
30 : "c" (msr), "a" (val1), "d" (val2))
31
32#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
33
34/* wrmsr with exception handling */
35#define wrmsr_safe(msr,a,b) ({ int ret__; \
36 asm volatile("2: wrmsr ; xorl %0,%0\n" \
37 "1:\n\t" \
38 ".section .fixup,\"ax\"\n\t" \
39 "3: movl %4,%0 ; jmp 1b\n\t" \
40 ".previous\n\t" \
41 ".section __ex_table,\"a\"\n" \
42 " .align 8\n\t" \
43 " .quad 2b,3b\n\t" \
44 ".previous" \
45 : "=a" (ret__) \
46 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
47 ret__; })
48
49#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
50
51#define rdmsr_safe(msr,a,b) \
52 ({ int ret__; \
53 asm volatile ("1: rdmsr\n" \
54 "2:\n" \
55 ".section .fixup,\"ax\"\n" \
56 "3: movl %4,%0\n" \
57 " jmp 2b\n" \
58 ".previous\n" \
59 ".section __ex_table,\"a\"\n" \
60 " .align 8\n" \
61 " .quad 1b,3b\n" \
62 ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
63 :"c"(msr), "i"(-EIO), "0"(0)); \
64 ret__; })
65
66#define rdtsc(low,high) \
67 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
68
69#define rdtscl(low) \
70 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
71
72#define rdtscp(low,high,aux) \
73 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
74
75#define rdtscll(val) do { \
76 unsigned int __a,__d; \
77 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
78 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
79} while(0)
80
81#define rdtscpll(val, aux) do { \
82 unsigned long __a, __d; \
83 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
84 (val) = (__d << 32) | __a; \
85} while (0)
86
87#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
88
89#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
90
91#define rdpmc(counter,low,high) \
92 __asm__ __volatile__("rdpmc" \
93 : "=a" (low), "=d" (high) \
94 : "c" (counter))
95
96static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
97 unsigned int *ecx, unsigned int *edx)
98{
99 __asm__("cpuid"
100 : "=a" (*eax),
101 "=b" (*ebx),
102 "=c" (*ecx),
103 "=d" (*edx)
104 : "0" (op));
105}
106
107/* Some CPUID calls want 'count' to be placed in ecx */
108static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
109 int *edx)
110{
111 __asm__("cpuid"
112 : "=a" (*eax),
113 "=b" (*ebx),
114 "=c" (*ecx),
115 "=d" (*edx)
116 : "0" (op), "c" (count));
117}
118
119/*
120 * CPUID functions returning a single datum
121 */
122static inline unsigned int cpuid_eax(unsigned int op)
123{
124 unsigned int eax;
125
126 __asm__("cpuid"
127 : "=a" (eax)
128 : "0" (op)
129 : "bx", "cx", "dx");
130 return eax;
131}
132static inline unsigned int cpuid_ebx(unsigned int op)
133{
134 unsigned int eax, ebx;
135
136 __asm__("cpuid"
137 : "=a" (eax), "=b" (ebx)
138 : "0" (op)
139 : "cx", "dx" );
140 return ebx;
141}
142static inline unsigned int cpuid_ecx(unsigned int op)
143{
144 unsigned int eax, ecx;
145
146 __asm__("cpuid"
147 : "=a" (eax), "=c" (ecx)
148 : "0" (op)
149 : "bx", "dx" );
150 return ecx;
151}
152static inline unsigned int cpuid_edx(unsigned int op)
153{
154 unsigned int eax, edx;
155
156 __asm__("cpuid"
157 : "=a" (eax), "=d" (edx)
158 : "0" (op)
159 : "bx", "cx");
160 return edx;
161}
162
163#ifdef CONFIG_SMP
164void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
165void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
166int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
167int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
168#else /* CONFIG_SMP */
169static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
170{
171 rdmsr(msr_no, *l, *h);
172}
173static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
174{
175 wrmsr(msr_no, l, h);
176}
177static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
178{
179 return rdmsr_safe(msr_no, l, h);
180}
181static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
182{
183 return wrmsr_safe(msr_no, l, h);
184}
185#endif /* CONFIG_SMP */
186#endif /* __ASSEMBLY__ */
187#endif /* X86_64_MSR_H */
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
index 34f633b3e00c..e8320e4e6ca2 100644
--- a/include/asm-x86/mtrr.h
+++ b/include/asm-x86/mtrr.h
@@ -1,13 +1,164 @@
1/* Generic MTRR (Memory Type Range Register) ioctls.
2
3 Copyright (C) 1997-1999 Richard Gooch
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/
23#ifndef _ASM_X86_MTRR_H
24#define _ASM_X86_MTRR_H
25
26#include <linux/ioctl.h>
27#include <linux/errno.h>
28
29#define MTRR_IOCTL_BASE 'M'
30
31struct mtrr_sentry
32{
33 unsigned long base; /* Base address */
34 unsigned int size; /* Size of region */
35 unsigned int type; /* Type of region */
36};
37
38/* Warning: this structure has a different order from i386
39 on x86-64. The 32bit emulation code takes care of that.
40 But you need to use this for 64bit, otherwise your X server
41 will break. */
42
43#ifdef __i386__
44struct mtrr_gentry
45{
46 unsigned int regnum; /* Register number */
47 unsigned long base; /* Base address */
48 unsigned int size; /* Size of region */
49 unsigned int type; /* Type of region */
50};
51
52#else /* __i386__ */
53
54struct mtrr_gentry
55{
56 unsigned long base; /* Base address */
57 unsigned int size; /* Size of region */
58 unsigned int regnum; /* Register number */
59 unsigned int type; /* Type of region */
60};
61#endif /* !__i386__ */
62
63/* These are the various ioctls */
64#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
65#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
66#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
67#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
68#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
69#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
70#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
71#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
72#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
73#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
74
75/* These are the region types */
76#define MTRR_TYPE_UNCACHABLE 0
77#define MTRR_TYPE_WRCOMB 1
78/*#define MTRR_TYPE_ 2*/
79/*#define MTRR_TYPE_ 3*/
80#define MTRR_TYPE_WRTHROUGH 4
81#define MTRR_TYPE_WRPROT 5
82#define MTRR_TYPE_WRBACK 6
83#define MTRR_NUM_TYPES 7
84
1#ifdef __KERNEL__ 85#ifdef __KERNEL__
2# ifdef CONFIG_X86_32 86
3# include "mtrr_32.h" 87/* The following functions are for use by other drivers */
4# else 88# ifdef CONFIG_MTRR
5# include "mtrr_64.h" 89extern void mtrr_save_fixed_ranges(void *);
6# endif 90extern void mtrr_save_state(void);
7#else 91extern int mtrr_add (unsigned long base, unsigned long size,
8# ifdef __i386__ 92 unsigned int type, char increment);
9# include "mtrr_32.h" 93extern int mtrr_add_page (unsigned long base, unsigned long size,
10# else 94 unsigned int type, char increment);
11# include "mtrr_64.h" 95extern int mtrr_del (int reg, unsigned long base, unsigned long size);
12# endif 96extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
13#endif 97extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
98extern void mtrr_ap_init(void);
99extern void mtrr_bp_init(void);
100# else
101#define mtrr_save_fixed_ranges(arg) do {} while (0)
102#define mtrr_save_state() do {} while (0)
103static __inline__ int mtrr_add (unsigned long base, unsigned long size,
104 unsigned int type, char increment)
105{
106 return -ENODEV;
107}
108static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
109 unsigned int type, char increment)
110{
111 return -ENODEV;
112}
113static __inline__ int mtrr_del (int reg, unsigned long base,
114 unsigned long size)
115{
116 return -ENODEV;
117}
118static __inline__ int mtrr_del_page (int reg, unsigned long base,
119 unsigned long size)
120{
121 return -ENODEV;
122}
123
124static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
125
126#define mtrr_ap_init() do {} while (0)
127#define mtrr_bp_init() do {} while (0)
128# endif
129
130#ifdef CONFIG_COMPAT
131#include <linux/compat.h>
132
133struct mtrr_sentry32
134{
135 compat_ulong_t base; /* Base address */
136 compat_uint_t size; /* Size of region */
137 compat_uint_t type; /* Type of region */
138};
139
140struct mtrr_gentry32
141{
142 compat_ulong_t regnum; /* Register number */
143 compat_uint_t base; /* Base address */
144 compat_uint_t size; /* Size of region */
145 compat_uint_t type; /* Type of region */
146};
147
148#define MTRR_IOCTL_BASE 'M'
149
150#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
151#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
152#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
153#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
154#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
155#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
156#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
157#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
158#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
159#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
160#endif /* CONFIG_COMPAT */
161
162#endif /* __KERNEL__ */
163
164#endif /* _ASM_X86_MTRR_H */
diff --git a/include/asm-x86/mtrr_32.h b/include/asm-x86/mtrr_32.h
deleted file mode 100644
index 7e9c7ccbdcfe..000000000000
--- a/include/asm-x86/mtrr_32.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/* Generic MTRR (Memory Type Range Register) ioctls.
2
3 Copyright (C) 1997-1999 Richard Gooch
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/
23#ifndef _LINUX_MTRR_H
24#define _LINUX_MTRR_H
25
26#include <linux/ioctl.h>
27#include <linux/errno.h>
28
29#define MTRR_IOCTL_BASE 'M'
30
31struct mtrr_sentry
32{
33 unsigned long base; /* Base address */
34 unsigned int size; /* Size of region */
35 unsigned int type; /* Type of region */
36};
37
38struct mtrr_gentry
39{
40 unsigned int regnum; /* Register number */
41 unsigned long base; /* Base address */
42 unsigned int size; /* Size of region */
43 unsigned int type; /* Type of region */
44};
45
46/* These are the various ioctls */
47#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
48#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
49#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
50#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
51#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
52#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
53#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
54#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
55#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
56#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
57
58/* These are the region types */
59#define MTRR_TYPE_UNCACHABLE 0
60#define MTRR_TYPE_WRCOMB 1
61/*#define MTRR_TYPE_ 2*/
62/*#define MTRR_TYPE_ 3*/
63#define MTRR_TYPE_WRTHROUGH 4
64#define MTRR_TYPE_WRPROT 5
65#define MTRR_TYPE_WRBACK 6
66#define MTRR_NUM_TYPES 7
67
68#ifdef __KERNEL__
69
70/* The following functions are for use by other drivers */
71# ifdef CONFIG_MTRR
72extern void mtrr_save_fixed_ranges(void *);
73extern void mtrr_save_state(void);
74extern int mtrr_add (unsigned long base, unsigned long size,
75 unsigned int type, char increment);
76extern int mtrr_add_page (unsigned long base, unsigned long size,
77 unsigned int type, char increment);
78extern int mtrr_del (int reg, unsigned long base, unsigned long size);
79extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
80extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
81extern void mtrr_ap_init(void);
82extern void mtrr_bp_init(void);
83# else
84#define mtrr_save_fixed_ranges(arg) do {} while (0)
85#define mtrr_save_state() do {} while (0)
86static __inline__ int mtrr_add (unsigned long base, unsigned long size,
87 unsigned int type, char increment)
88{
89 return -ENODEV;
90}
91static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
92 unsigned int type, char increment)
93{
94 return -ENODEV;
95}
96static __inline__ int mtrr_del (int reg, unsigned long base,
97 unsigned long size)
98{
99 return -ENODEV;
100}
101static __inline__ int mtrr_del_page (int reg, unsigned long base,
102 unsigned long size)
103{
104 return -ENODEV;
105}
106
107static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
108
109#define mtrr_ap_init() do {} while (0)
110#define mtrr_bp_init() do {} while (0)
111# endif
112
113#endif
114
115#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-x86/mtrr_64.h b/include/asm-x86/mtrr_64.h
deleted file mode 100644
index b557c486bef8..000000000000
--- a/include/asm-x86/mtrr_64.h
+++ /dev/null
@@ -1,152 +0,0 @@
1/* Generic MTRR (Memory Type Range Register) ioctls.
2
3 Copyright (C) 1997-1999 Richard Gooch
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/
23#ifndef _LINUX_MTRR_H
24#define _LINUX_MTRR_H
25
26#include <linux/ioctl.h>
27
28#define MTRR_IOCTL_BASE 'M'
29
30struct mtrr_sentry
31{
32 unsigned long base; /* Base address */
33 unsigned int size; /* Size of region */
34 unsigned int type; /* Type of region */
35};
36
37/* Warning: this structure has a different order from i386
38 on x86-64. The 32bit emulation code takes care of that.
39 But you need to use this for 64bit, otherwise your X server
40 will break. */
41struct mtrr_gentry
42{
43 unsigned long base; /* Base address */
44 unsigned int size; /* Size of region */
45 unsigned int regnum; /* Register number */
46 unsigned int type; /* Type of region */
47};
48
49/* These are the various ioctls */
50#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
51#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
52#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
53#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
54#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
55#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
56#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
57#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
58#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
59#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
60
61/* These are the region types */
62#define MTRR_TYPE_UNCACHABLE 0
63#define MTRR_TYPE_WRCOMB 1
64/*#define MTRR_TYPE_ 2*/
65/*#define MTRR_TYPE_ 3*/
66#define MTRR_TYPE_WRTHROUGH 4
67#define MTRR_TYPE_WRPROT 5
68#define MTRR_TYPE_WRBACK 6
69#define MTRR_NUM_TYPES 7
70
71#ifdef __KERNEL__
72
73/* The following functions are for use by other drivers */
74# ifdef CONFIG_MTRR
75extern int mtrr_add (unsigned long base, unsigned long size,
76 unsigned int type, char increment);
77extern int mtrr_add_page (unsigned long base, unsigned long size,
78 unsigned int type, char increment);
79extern int mtrr_del (int reg, unsigned long base, unsigned long size);
80extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
81# else
82static __inline__ int mtrr_add (unsigned long base, unsigned long size,
83 unsigned int type, char increment)
84{
85 return -ENODEV;
86}
87static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
88 unsigned int type, char increment)
89{
90 return -ENODEV;
91}
92static __inline__ int mtrr_del (int reg, unsigned long base,
93 unsigned long size)
94{
95 return -ENODEV;
96}
97static __inline__ int mtrr_del_page (int reg, unsigned long base,
98 unsigned long size)
99{
100 return -ENODEV;
101}
102
103#endif /* CONFIG_MTRR */
104
105#ifdef CONFIG_COMPAT
106#include <linux/compat.h>
107
108struct mtrr_sentry32
109{
110 compat_ulong_t base; /* Base address */
111 compat_uint_t size; /* Size of region */
112 compat_uint_t type; /* Type of region */
113};
114
115struct mtrr_gentry32
116{
117 compat_ulong_t regnum; /* Register number */
118 compat_uint_t base; /* Base address */
119 compat_uint_t size; /* Size of region */
120 compat_uint_t type; /* Type of region */
121};
122
123#define MTRR_IOCTL_BASE 'M'
124
125#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
126#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
127#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
128#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
129#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
130#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
131#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
132#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
133#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
134#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
135
136#endif /* CONFIG_COMPAT */
137
138#ifdef CONFIG_MTRR
139extern void mtrr_ap_init(void);
140extern void mtrr_bp_init(void);
141extern void mtrr_save_fixed_ranges(void *);
142extern void mtrr_save_state(void);
143#else
144#define mtrr_ap_init() do {} while (0)
145#define mtrr_bp_init() do {} while (0)
146#define mtrr_save_fixed_ranges(arg) do {} while (0)
147#define mtrr_save_state() do {} while (0)
148#endif
149
150#endif /* __KERNEL__ */
151
152#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index bc4d64a87689..213c97300cb3 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -1,13 +1,142 @@
1#ifndef _ASM_X86_PTRACE_H
2#define _ASM_X86_PTRACE_H
3
4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h>
6
7#ifndef __ASSEMBLY__
8
9#ifdef __i386__
10/* this struct defines the way the registers are stored on the
11 stack during a system call. */
12
13struct pt_regs {
14 long ebx;
15 long ecx;
16 long edx;
17 long esi;
18 long edi;
19 long ebp;
20 long eax;
21 int xds;
22 int xes;
23 int xfs;
24 /* int xgs; */
25 long orig_eax;
26 long eip;
27 int xcs;
28 long eflags;
29 long esp;
30 int xss;
31};
32
1#ifdef __KERNEL__ 33#ifdef __KERNEL__
2# ifdef CONFIG_X86_32 34
3# include "ptrace_32.h" 35#include <asm/vm86.h>
4# else 36#include <asm/segment.h>
5# include "ptrace_64.h" 37
6# endif 38struct task_struct;
7#else 39extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
8# ifdef __i386__ 40
9# include "ptrace_32.h" 41/*
10# else 42 * user_mode_vm(regs) determines whether a register set came from user mode.
11# include "ptrace_64.h" 43 * This is true if V8086 mode was enabled OR if the register set was from
12# endif 44 * protected mode with RPL-3 CS value. This tricky test checks that with
45 * one comparison. Many places in the kernel can bypass this full check
46 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
47 */
48static inline int user_mode(struct pt_regs *regs)
49{
50 return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
51}
52static inline int user_mode_vm(struct pt_regs *regs)
53{
54 return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
55}
56static inline int v8086_mode(struct pt_regs *regs)
57{
58 return (regs->eflags & VM_MASK);
59}
60
61#define instruction_pointer(regs) ((regs)->eip)
62#define frame_pointer(regs) ((regs)->ebp)
63#define stack_pointer(regs) ((regs)->esp)
64#define regs_return_value(regs) ((regs)->eax)
65
66extern unsigned long profile_pc(struct pt_regs *regs);
67#endif /* __KERNEL__ */
68
69#else /* __i386__ */
70
71struct pt_regs {
72 unsigned long r15;
73 unsigned long r14;
74 unsigned long r13;
75 unsigned long r12;
76 unsigned long rbp;
77 unsigned long rbx;
78/* arguments: non interrupts/non tracing syscalls only save upto here*/
79 unsigned long r11;
80 unsigned long r10;
81 unsigned long r9;
82 unsigned long r8;
83 unsigned long rax;
84 unsigned long rcx;
85 unsigned long rdx;
86 unsigned long rsi;
87 unsigned long rdi;
88 unsigned long orig_rax;
89/* end of arguments */
90/* cpu exception frame or undefined */
91 unsigned long rip;
92 unsigned long cs;
93 unsigned long eflags;
94 unsigned long rsp;
95 unsigned long ss;
96/* top of stack page */
97};
98
99#ifdef __KERNEL__
100
101#define user_mode(regs) (!!((regs)->cs & 3))
102#define user_mode_vm(regs) user_mode(regs)
103#define instruction_pointer(regs) ((regs)->rip)
104#define frame_pointer(regs) ((regs)->rbp)
105#define stack_pointer(regs) ((regs)->rsp)
106#define regs_return_value(regs) ((regs)->rax)
107
108extern unsigned long profile_pc(struct pt_regs *regs);
109void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
110
111struct task_struct;
112
113extern unsigned long
114convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
115
116enum {
117 EF_CF = 0x00000001,
118 EF_PF = 0x00000004,
119 EF_AF = 0x00000010,
120 EF_ZF = 0x00000040,
121 EF_SF = 0x00000080,
122 EF_TF = 0x00000100,
123 EF_IE = 0x00000200,
124 EF_DF = 0x00000400,
125 EF_OF = 0x00000800,
126 EF_IOPL = 0x00003000,
127 EF_IOPL_RING0 = 0x00000000,
128 EF_IOPL_RING1 = 0x00001000,
129 EF_IOPL_RING2 = 0x00002000,
130 EF_NT = 0x00004000, /* nested task */
131 EF_RF = 0x00010000, /* resume */
132 EF_VM = 0x00020000, /* virtual mode */
133 EF_AC = 0x00040000, /* alignment */
134 EF_VIF = 0x00080000, /* virtual interrupt */
135 EF_VIP = 0x00100000, /* virtual interrupt pending */
136 EF_ID = 0x00200000, /* id */
137};
138#endif /* __KERNEL__ */
139#endif /* !__i386__ */
140#endif /* !__ASSEMBLY__ */
141
13#endif 142#endif
diff --git a/include/asm-x86/ptrace_32.h b/include/asm-x86/ptrace_32.h
deleted file mode 100644
index 78d063dabe0a..000000000000
--- a/include/asm-x86/ptrace_32.h
+++ /dev/null
@@ -1,65 +0,0 @@
1#ifndef _I386_PTRACE_H
2#define _I386_PTRACE_H
3
4#include <asm/ptrace-abi.h>
5
6/* this struct defines the way the registers are stored on the
7 stack during a system call. */
8
9struct pt_regs {
10 long ebx;
11 long ecx;
12 long edx;
13 long esi;
14 long edi;
15 long ebp;
16 long eax;
17 int xds;
18 int xes;
19 int xfs;
20 /* int xgs; */
21 long orig_eax;
22 long eip;
23 int xcs;
24 long eflags;
25 long esp;
26 int xss;
27};
28
29#ifdef __KERNEL__
30
31#include <asm/vm86.h>
32#include <asm/segment.h>
33
34struct task_struct;
35extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
36
37/*
38 * user_mode_vm(regs) determines whether a register set came from user mode.
39 * This is true if V8086 mode was enabled OR if the register set was from
40 * protected mode with RPL-3 CS value. This tricky test checks that with
41 * one comparison. Many places in the kernel can bypass this full check
42 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
43 */
44static inline int user_mode(struct pt_regs *regs)
45{
46 return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
47}
48static inline int user_mode_vm(struct pt_regs *regs)
49{
50 return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
51}
52static inline int v8086_mode(struct pt_regs *regs)
53{
54 return (regs->eflags & VM_MASK);
55}
56
57#define instruction_pointer(regs) ((regs)->eip)
58#define frame_pointer(regs) ((regs)->ebp)
59#define stack_pointer(regs) ((regs)->esp)
60#define regs_return_value(regs) ((regs)->eax)
61
62extern unsigned long profile_pc(struct pt_regs *regs);
63#endif /* __KERNEL__ */
64
65#endif
diff --git a/include/asm-x86/ptrace_64.h b/include/asm-x86/ptrace_64.h
deleted file mode 100644
index 7bfe61e1b705..000000000000
--- a/include/asm-x86/ptrace_64.h
+++ /dev/null
@@ -1,80 +0,0 @@
1#ifndef _X86_64_PTRACE_H
2#define _X86_64_PTRACE_H
3
4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h>
6
7#ifndef __ASSEMBLY__
8
9struct pt_regs {
10 unsigned long r15;
11 unsigned long r14;
12 unsigned long r13;
13 unsigned long r12;
14 unsigned long rbp;
15 unsigned long rbx;
16/* arguments: non interrupts/non tracing syscalls only save upto here*/
17 unsigned long r11;
18 unsigned long r10;
19 unsigned long r9;
20 unsigned long r8;
21 unsigned long rax;
22 unsigned long rcx;
23 unsigned long rdx;
24 unsigned long rsi;
25 unsigned long rdi;
26 unsigned long orig_rax;
27/* end of arguments */
28/* cpu exception frame or undefined */
29 unsigned long rip;
30 unsigned long cs;
31 unsigned long eflags;
32 unsigned long rsp;
33 unsigned long ss;
34/* top of stack page */
35};
36
37#endif
38
39#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
40#define user_mode(regs) (!!((regs)->cs & 3))
41#define user_mode_vm(regs) user_mode(regs)
42#define instruction_pointer(regs) ((regs)->rip)
43#define frame_pointer(regs) ((regs)->rbp)
44#define stack_pointer(regs) ((regs)->rsp)
45#define regs_return_value(regs) ((regs)->rax)
46
47extern unsigned long profile_pc(struct pt_regs *regs);
48void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
49
50struct task_struct;
51
52extern unsigned long
53convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
54
55enum {
56 EF_CF = 0x00000001,
57 EF_PF = 0x00000004,
58 EF_AF = 0x00000010,
59 EF_ZF = 0x00000040,
60 EF_SF = 0x00000080,
61 EF_TF = 0x00000100,
62 EF_IE = 0x00000200,
63 EF_DF = 0x00000400,
64 EF_OF = 0x00000800,
65 EF_IOPL = 0x00003000,
66 EF_IOPL_RING0 = 0x00000000,
67 EF_IOPL_RING1 = 0x00001000,
68 EF_IOPL_RING2 = 0x00002000,
69 EF_NT = 0x00004000, /* nested task */
70 EF_RF = 0x00010000, /* resume */
71 EF_VM = 0x00020000, /* virtual mode */
72 EF_AC = 0x00040000, /* alignment */
73 EF_VIF = 0x00080000, /* virtual interrupt */
74 EF_VIP = 0x00100000, /* virtual interrupt pending */
75 EF_ID = 0x00200000, /* id */
76};
77
78#endif
79
80#endif
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h
index 8b64f3ea2b78..7400d3ad75c6 100644
--- a/include/asm-x86/required-features.h
+++ b/include/asm-x86/required-features.h
@@ -1,5 +1,72 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_REQUIRED_FEATURES_H
2# include "required-features_32.h" 2#define _ASM_REQUIRED_FEATURES_H 1
3
4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the
6 kernel dies. Make sure to assign features to the proper mask!
7
8 Some requirements that are not in CPUID yet are also in the
9 CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
10
11 The real information is in arch/x86/Kconfig.cpu, this just converts
12 the CONFIGs into a bitmask */
13
14#ifndef CONFIG_MATH_EMULATION
15# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
3#else 16#else
4# include "required-features_64.h" 17# define NEED_FPU 0
18#endif
19
20#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
22# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
23#else
24# define NEED_PAE 0
25# define NEED_CX8 0
26#endif
27
28#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
29# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
30#else
31# define NEED_CMOV 0
32#endif
33
34#ifdef CONFIG_X86_USE_3DNOW
35# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
36#else
37# define NEED_3DNOW 0
38#endif
39
40#ifdef CONFIG_X86_64
41#define NEED_PSE (1<<(X86_FEATURE_PSE & 31))
42#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
43#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
44#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
45#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
46#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
47#define NEED_LM (1<<(X86_FEATURE_LM & 31))
48#else
49#define NEED_PSE 0
50#define NEED_MSR 0
51#define NEED_PGE 0
52#define NEED_FXSR 0
53#define NEED_XMM 0
54#define NEED_XMM2 0
55#define NEED_LM 0
56#endif
57
58#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
59 NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
60 NEED_XMM|NEED_XMM2)
61#define SSE_MASK (NEED_XMM|NEED_XMM2)
62
63#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
64
65#define REQUIRED_MASK2 0
66#define REQUIRED_MASK3 0
67#define REQUIRED_MASK4 0
68#define REQUIRED_MASK5 0
69#define REQUIRED_MASK6 0
70#define REQUIRED_MASK7 0
71
5#endif 72#endif
diff --git a/include/asm-x86/required-features_32.h b/include/asm-x86/required-features_32.h
deleted file mode 100644
index 618feb98f9f5..000000000000
--- a/include/asm-x86/required-features_32.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _ASM_REQUIRED_FEATURES_H
2#define _ASM_REQUIRED_FEATURES_H 1
3
4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the
6 kernel dies. Make sure to assign features to the proper mask!
7
8 Some requirements that are not in CPUID yet are also in the
9 CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
10
11 The real information is in arch/i386/Kconfig.cpu, this just converts
12 the CONFIGs into a bitmask */
13
14#ifndef CONFIG_MATH_EMULATION
15# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
16#else
17# define NEED_FPU 0
18#endif
19
20#ifdef CONFIG_X86_PAE
21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
22#else
23# define NEED_PAE 0
24#endif
25
26#ifdef CONFIG_X86_CMOV
27# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
28#else
29# define NEED_CMOV 0
30#endif
31
32#ifdef CONFIG_X86_PAE
33# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
34#else
35# define NEED_CX8 0
36#endif
37
38#define REQUIRED_MASK0 (NEED_FPU|NEED_PAE|NEED_CMOV|NEED_CX8)
39
40#ifdef CONFIG_X86_USE_3DNOW
41# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
42#else
43# define NEED_3DNOW 0
44#endif
45
46#define REQUIRED_MASK1 (NEED_3DNOW)
47
48#define REQUIRED_MASK2 0
49#define REQUIRED_MASK3 0
50#define REQUIRED_MASK4 0
51#define REQUIRED_MASK5 0
52#define REQUIRED_MASK6 0
53#define REQUIRED_MASK7 0
54
55#endif
diff --git a/include/asm-x86/required-features_64.h b/include/asm-x86/required-features_64.h
deleted file mode 100644
index e80d5761b00a..000000000000
--- a/include/asm-x86/required-features_64.h
+++ /dev/null
@@ -1,46 +0,0 @@
1#ifndef _ASM_REQUIRED_FEATURES_H
2#define _ASM_REQUIRED_FEATURES_H 1
3
4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the
6 kernel dies. Make sure to assign features to the proper mask!
7
8 The real information is in arch/x86_64/Kconfig.cpu, this just converts
9 the CONFIGs into a bitmask */
10
11/* x86-64 baseline features */
12#define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
13#define NEED_PSE (1<<(X86_FEATURE_PSE & 31))
14#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
15#define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
16#define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
17#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
18#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
19#define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
20#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
21#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
22
23#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
24 NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
25 NEED_XMM|NEED_XMM2)
26#define SSE_MASK (NEED_XMM|NEED_XMM2)
27
28/* x86-64 baseline features */
29#define NEED_LM (1<<(X86_FEATURE_LM & 31))
30
31#ifdef CONFIG_X86_USE_3DNOW
32# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
33#else
34# define NEED_3DNOW 0
35#endif
36
37#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
38
39#define REQUIRED_MASK2 0
40#define REQUIRED_MASK3 0
41#define REQUIRED_MASK4 0
42#define REQUIRED_MASK5 0
43#define REQUIRED_MASK6 0
44#define REQUIRED_MASK7 0
45
46#endif
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index 81c0d98bb1c8..24d786e07b49 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -1,13 +1,63 @@
1#ifndef _ASM_X86_SETUP_H
2#define _ASM_X86_SETUP_H
3
4#define COMMAND_LINE_SIZE 2048
5
1#ifdef __KERNEL__ 6#ifdef __KERNEL__
2# ifdef CONFIG_X86_32 7
3# include "setup_32.h" 8#ifdef __i386__
4# else 9
5# include "setup_64.h" 10#include <linux/pfn.h>
6# endif 11/*
7#else 12 * Reserved space for vmalloc and iomap - defined in asm/page.h
8# ifdef __i386__ 13 */
9# include "setup_32.h" 14#define MAXMEM_PFN PFN_DOWN(MAXMEM)
10# else 15#define MAX_NONPAE_PFN (1 << 20)
11# include "setup_64.h" 16
12# endif 17#endif /* __i386__ */
18
19#define PARAM_SIZE 4096 /* sizeof(struct boot_params) */
20
21#define OLD_CL_MAGIC 0xA33F
22#define OLD_CL_ADDRESS 0x020 /* Relative to real mode data */
23#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
24
25#ifndef __ASSEMBLY__
26#include <asm/bootparam.h>
27
28#ifndef _SETUP
29
30/*
31 * This is set up by the setup-routine at boot-time
32 */
33extern struct boot_params boot_params;
34
35#ifdef __i386__
36/*
37 * Do NOT EVER look at the BIOS memory size location.
38 * It does not work on many machines.
39 */
40#define LOWMEMSIZE() (0x9f000)
41
42struct e820entry;
43
44char * __init machine_specific_memory_setup(void);
45char *memory_setup(void);
46
47int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
48int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
49void __init add_memory_region(unsigned long long start,
50 unsigned long long size, int type);
51
52extern unsigned long init_pg_tables_end;
53
54#ifndef CONFIG_PARAVIRT
55#define paravirt_post_allocator_init() do {} while (0)
13#endif 56#endif
57
58#endif /* __i386__ */
59#endif /* _SETUP */
60#endif /* __ASSEMBLY__ */
61#endif /* __KERNEL__ */
62
63#endif /* _ASM_X86_SETUP_H */
diff --git a/include/asm-x86/setup_32.h b/include/asm-x86/setup_32.h
deleted file mode 100644
index 7a57ca8a1793..000000000000
--- a/include/asm-x86/setup_32.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Just a place holder. We don't want to have to test x86 before
3 * we include stuff
4 */
5
6#ifndef _i386_SETUP_H
7#define _i386_SETUP_H
8
9#define COMMAND_LINE_SIZE 2048
10
11#ifdef __KERNEL__
12#include <linux/pfn.h>
13
14/*
15 * Reserved space for vmalloc and iomap - defined in asm/page.h
16 */
17#define MAXMEM_PFN PFN_DOWN(MAXMEM)
18#define MAX_NONPAE_PFN (1 << 20)
19
20#define PARAM_SIZE 4096
21
22#define OLD_CL_MAGIC_ADDR 0x90020
23#define OLD_CL_MAGIC 0xA33F
24#define OLD_CL_BASE_ADDR 0x90000
25#define OLD_CL_OFFSET 0x90022
26#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
27
28#ifndef __ASSEMBLY__
29
30#include <asm/bootparam.h>
31
32/*
33 * This is set up by the setup-routine at boot-time
34 */
35extern struct boot_params boot_params;
36
37/*
38 * Do NOT EVER look at the BIOS memory size location.
39 * It does not work on many machines.
40 */
41#define LOWMEMSIZE() (0x9f000)
42
43struct e820entry;
44
45char * __init machine_specific_memory_setup(void);
46char *memory_setup(void);
47
48int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
49int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
50void __init add_memory_region(unsigned long long start,
51 unsigned long long size, int type);
52
53extern unsigned long init_pg_tables_end;
54
55#ifndef CONFIG_PARAVIRT
56#define paravirt_post_allocator_init() do {} while (0)
57#endif
58
59#endif /* __ASSEMBLY__ */
60
61#endif /* __KERNEL__ */
62
63#endif /* _i386_SETUP_H */
diff --git a/include/asm-x86/setup_64.h b/include/asm-x86/setup_64.h
deleted file mode 100644
index a04aadcccf67..000000000000
--- a/include/asm-x86/setup_64.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _x8664_SETUP_H
2#define _x8664_SETUP_H
3
4#define COMMAND_LINE_SIZE 2048
5
6#ifdef __KERNEL__
7
8#ifndef __ASSEMBLY__
9#include <asm/bootparam.h>
10
11/*
12 * This is set up by the setup-routine at boot-time
13 */
14extern struct boot_params boot_params;
15
16#endif /* not __ASSEMBLY__ */
17#endif /* __KERNEL__ */
18
19#endif
diff --git a/include/asm-x86/shmbuf.h b/include/asm-x86/shmbuf.h
index e85f1cb11217..b51413b74971 100644
--- a/include/asm-x86/shmbuf.h
+++ b/include/asm-x86/shmbuf.h
@@ -1,13 +1,51 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_SHMBUF_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_SHMBUF_H
3# include "shmbuf_32.h" 3
4# else 4/*
5# include "shmbuf_64.h" 5 * The shmid64_ds structure for x86 architecture.
6# endif 6 * Note extra padding because this structure is passed back and forth
7#else 7 * between kernel and user space.
8# ifdef __i386__ 8 *
9# include "shmbuf_32.h" 9 * Pad space on 32 bit is left for:
10# else 10 * - 64-bit time_t to solve y2038 problem
11# include "shmbuf_64.h" 11 * - 2 miscellaneous 32-bit values
12# endif 12 *
13 * Pad space on 64 bit is left for:
14 * - 2 miscellaneous 64-bit values
15 */
16
17struct shmid64_ds {
18 struct ipc64_perm shm_perm; /* operation perms */
19 size_t shm_segsz; /* size of segment (bytes) */
20 __kernel_time_t shm_atime; /* last attach time */
21#ifdef __i386__
22 unsigned long __unused1;
13#endif 23#endif
24 __kernel_time_t shm_dtime; /* last detach time */
25#ifdef __i386__
26 unsigned long __unused2;
27#endif
28 __kernel_time_t shm_ctime; /* last change time */
29#ifdef __i386__
30 unsigned long __unused3;
31#endif
32 __kernel_pid_t shm_cpid; /* pid of creator */
33 __kernel_pid_t shm_lpid; /* pid of last operator */
34 unsigned long shm_nattch; /* no. of current attaches */
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39struct shminfo64 {
40 unsigned long shmmax;
41 unsigned long shmmin;
42 unsigned long shmmni;
43 unsigned long shmseg;
44 unsigned long shmall;
45 unsigned long __unused1;
46 unsigned long __unused2;
47 unsigned long __unused3;
48 unsigned long __unused4;
49};
50
51#endif /* _ASM_X86_SHMBUF_H */
diff --git a/include/asm-x86/shmbuf_32.h b/include/asm-x86/shmbuf_32.h
deleted file mode 100644
index d1cdc3cb079b..000000000000
--- a/include/asm-x86/shmbuf_32.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef _I386_SHMBUF_H
2#define _I386_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct shmid64_ds {
15 struct ipc64_perm shm_perm; /* operation perms */
16 size_t shm_segsz; /* size of segment (bytes) */
17 __kernel_time_t shm_atime; /* last attach time */
18 unsigned long __unused1;
19 __kernel_time_t shm_dtime; /* last detach time */
20 unsigned long __unused2;
21 __kernel_time_t shm_ctime; /* last change time */
22 unsigned long __unused3;
23 __kernel_pid_t shm_cpid; /* pid of creator */
24 __kernel_pid_t shm_lpid; /* pid of last operator */
25 unsigned long shm_nattch; /* no. of current attaches */
26 unsigned long __unused4;
27 unsigned long __unused5;
28};
29
30struct shminfo64 {
31 unsigned long shmmax;
32 unsigned long shmmin;
33 unsigned long shmmni;
34 unsigned long shmseg;
35 unsigned long shmall;
36 unsigned long __unused1;
37 unsigned long __unused2;
38 unsigned long __unused3;
39 unsigned long __unused4;
40};
41
42#endif /* _I386_SHMBUF_H */
diff --git a/include/asm-x86/shmbuf_64.h b/include/asm-x86/shmbuf_64.h
deleted file mode 100644
index 5a6d6dda7c48..000000000000
--- a/include/asm-x86/shmbuf_64.h
+++ /dev/null
@@ -1,38 +0,0 @@
1#ifndef _X8664_SHMBUF_H
2#define _X8664_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for x8664 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct shmid64_ds {
14 struct ipc64_perm shm_perm; /* operation perms */
15 size_t shm_segsz; /* size of segment (bytes) */
16 __kernel_time_t shm_atime; /* last attach time */
17 __kernel_time_t shm_dtime; /* last detach time */
18 __kernel_time_t shm_ctime; /* last change time */
19 __kernel_pid_t shm_cpid; /* pid of creator */
20 __kernel_pid_t shm_lpid; /* pid of last operator */
21 unsigned long shm_nattch; /* no. of current attaches */
22 unsigned long __unused4;
23 unsigned long __unused5;
24};
25
26struct shminfo64 {
27 unsigned long shmmax;
28 unsigned long shmmin;
29 unsigned long shmmni;
30 unsigned long shmseg;
31 unsigned long shmall;
32 unsigned long __unused1;
33 unsigned long __unused2;
34 unsigned long __unused3;
35 unsigned long __unused4;
36};
37
38#endif
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
index 0d16ceff1599..c047f9dc3423 100644
--- a/include/asm-x86/sigcontext.h
+++ b/include/asm-x86/sigcontext.h
@@ -1,13 +1,138 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_SIGCONTEXT_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_SIGCONTEXT_H
3# include "sigcontext_32.h" 3
4# else 4#include <linux/compiler.h>
5# include "sigcontext_64.h" 5#include <asm/types.h>
6# endif 6
7#else 7#ifdef __i386__
8# ifdef __i386__ 8/*
9# include "sigcontext_32.h" 9 * As documented in the iBCS2 standard..
10# else 10 *
11# include "sigcontext_64.h" 11 * The first part of "struct _fpstate" is just the normal i387
12# endif 12 * hardware setup, the extra "status" word is used to save the
13 * coprocessor status word before entering the handler.
14 *
15 * Pentium III FXSR, SSE support
16 * Gareth Hughes <gareth@valinux.com>, May 2000
17 *
18 * The FPU state data structure has had to grow to accommodate the
19 * extended FPU state required by the Streaming SIMD Extensions.
20 * There is no documented standard to accomplish this at the moment.
21 */
22struct _fpreg {
23 unsigned short significand[4];
24 unsigned short exponent;
25};
26
27struct _fpxreg {
28 unsigned short significand[4];
29 unsigned short exponent;
30 unsigned short padding[3];
31};
32
33struct _xmmreg {
34 unsigned long element[4];
35};
36
37struct _fpstate {
38 /* Regular FPU environment */
39 unsigned long cw;
40 unsigned long sw;
41 unsigned long tag;
42 unsigned long ipoff;
43 unsigned long cssel;
44 unsigned long dataoff;
45 unsigned long datasel;
46 struct _fpreg _st[8];
47 unsigned short status;
48 unsigned short magic; /* 0xffff = regular FPU data only */
49
50 /* FXSR FPU environment */
51 unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
52 unsigned long mxcsr;
53 unsigned long reserved;
54 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
55 struct _xmmreg _xmm[8];
56 unsigned long padding[56];
57};
58
59#define X86_FXSR_MAGIC 0x0000
60
61struct sigcontext {
62 unsigned short gs, __gsh;
63 unsigned short fs, __fsh;
64 unsigned short es, __esh;
65 unsigned short ds, __dsh;
66 unsigned long edi;
67 unsigned long esi;
68 unsigned long ebp;
69 unsigned long esp;
70 unsigned long ebx;
71 unsigned long edx;
72 unsigned long ecx;
73 unsigned long eax;
74 unsigned long trapno;
75 unsigned long err;
76 unsigned long eip;
77 unsigned short cs, __csh;
78 unsigned long eflags;
79 unsigned long esp_at_signal;
80 unsigned short ss, __ssh;
81 struct _fpstate __user * fpstate;
82 unsigned long oldmask;
83 unsigned long cr2;
84};
85
86#else /* __i386__ */
87
88/* FXSAVE frame */
89/* Note: reserved1/2 may someday contain valuable data. Always save/restore
90 them when you change signal frames. */
91struct _fpstate {
92 __u16 cwd;
93 __u16 swd;
94 __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
95 __u16 fop;
96 __u64 rip;
97 __u64 rdp;
98 __u32 mxcsr;
99 __u32 mxcsr_mask;
100 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
101 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
102 __u32 reserved2[24];
103};
104
105struct sigcontext {
106 unsigned long r8;
107 unsigned long r9;
108 unsigned long r10;
109 unsigned long r11;
110 unsigned long r12;
111 unsigned long r13;
112 unsigned long r14;
113 unsigned long r15;
114 unsigned long rdi;
115 unsigned long rsi;
116 unsigned long rbp;
117 unsigned long rbx;
118 unsigned long rdx;
119 unsigned long rax;
120 unsigned long rcx;
121 unsigned long rsp;
122 unsigned long rip;
123 unsigned long eflags; /* RFLAGS */
124 unsigned short cs;
125 unsigned short gs;
126 unsigned short fs;
127 unsigned short __pad0;
128 unsigned long err;
129 unsigned long trapno;
130 unsigned long oldmask;
131 unsigned long cr2;
132 struct _fpstate __user *fpstate; /* zero when no FPU context */
133 unsigned long reserved1[8];
134};
135
136#endif /* !__i386__ */
137
13#endif 138#endif
diff --git a/include/asm-x86/sigcontext_32.h b/include/asm-x86/sigcontext_32.h
deleted file mode 100644
index aaef089a7787..000000000000
--- a/include/asm-x86/sigcontext_32.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef _ASMi386_SIGCONTEXT_H
2#define _ASMi386_SIGCONTEXT_H
3
4#include <linux/compiler.h>
5
6/*
7 * As documented in the iBCS2 standard..
8 *
9 * The first part of "struct _fpstate" is just the normal i387
10 * hardware setup, the extra "status" word is used to save the
11 * coprocessor status word before entering the handler.
12 *
13 * Pentium III FXSR, SSE support
14 * Gareth Hughes <gareth@valinux.com>, May 2000
15 *
16 * The FPU state data structure has had to grow to accommodate the
17 * extended FPU state required by the Streaming SIMD Extensions.
18 * There is no documented standard to accomplish this at the moment.
19 */
20struct _fpreg {
21 unsigned short significand[4];
22 unsigned short exponent;
23};
24
25struct _fpxreg {
26 unsigned short significand[4];
27 unsigned short exponent;
28 unsigned short padding[3];
29};
30
31struct _xmmreg {
32 unsigned long element[4];
33};
34
35struct _fpstate {
36 /* Regular FPU environment */
37 unsigned long cw;
38 unsigned long sw;
39 unsigned long tag;
40 unsigned long ipoff;
41 unsigned long cssel;
42 unsigned long dataoff;
43 unsigned long datasel;
44 struct _fpreg _st[8];
45 unsigned short status;
46 unsigned short magic; /* 0xffff = regular FPU data only */
47
48 /* FXSR FPU environment */
49 unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
50 unsigned long mxcsr;
51 unsigned long reserved;
52 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
53 struct _xmmreg _xmm[8];
54 unsigned long padding[56];
55};
56
57#define X86_FXSR_MAGIC 0x0000
58
59struct sigcontext {
60 unsigned short gs, __gsh;
61 unsigned short fs, __fsh;
62 unsigned short es, __esh;
63 unsigned short ds, __dsh;
64 unsigned long edi;
65 unsigned long esi;
66 unsigned long ebp;
67 unsigned long esp;
68 unsigned long ebx;
69 unsigned long edx;
70 unsigned long ecx;
71 unsigned long eax;
72 unsigned long trapno;
73 unsigned long err;
74 unsigned long eip;
75 unsigned short cs, __csh;
76 unsigned long eflags;
77 unsigned long esp_at_signal;
78 unsigned short ss, __ssh;
79 struct _fpstate __user * fpstate;
80 unsigned long oldmask;
81 unsigned long cr2;
82};
83
84
85#endif
diff --git a/include/asm-x86/sigcontext_64.h b/include/asm-x86/sigcontext_64.h
deleted file mode 100644
index b4e40236666c..000000000000
--- a/include/asm-x86/sigcontext_64.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef _ASM_X86_64_SIGCONTEXT_H
2#define _ASM_X86_64_SIGCONTEXT_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7/* FXSAVE frame */
8/* Note: reserved1/2 may someday contain valuable data. Always save/restore
9 them when you change signal frames. */
10struct _fpstate {
11 __u16 cwd;
12 __u16 swd;
13 __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
14 __u16 fop;
15 __u64 rip;
16 __u64 rdp;
17 __u32 mxcsr;
18 __u32 mxcsr_mask;
19 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
20 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
21 __u32 reserved2[24];
22};
23
24struct sigcontext {
25 unsigned long r8;
26 unsigned long r9;
27 unsigned long r10;
28 unsigned long r11;
29 unsigned long r12;
30 unsigned long r13;
31 unsigned long r14;
32 unsigned long r15;
33 unsigned long rdi;
34 unsigned long rsi;
35 unsigned long rbp;
36 unsigned long rbx;
37 unsigned long rdx;
38 unsigned long rax;
39 unsigned long rcx;
40 unsigned long rsp;
41 unsigned long rip;
42 unsigned long eflags; /* RFLAGS */
43 unsigned short cs;
44 unsigned short gs;
45 unsigned short fs;
46 unsigned short __pad0;
47 unsigned long err;
48 unsigned long trapno;
49 unsigned long oldmask;
50 unsigned long cr2;
51 struct _fpstate __user *fpstate; /* zero when no FPU context */
52 unsigned long reserved1[8];
53};
54
55#endif
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h
index bf5a63f457da..987a422a2c78 100644
--- a/include/asm-x86/signal.h
+++ b/include/asm-x86/signal.h
@@ -1,13 +1,266 @@
1#ifndef _ASM_X86_SIGNAL_H
2#define _ASM_X86_SIGNAL_H
3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6#include <linux/time.h>
7#include <linux/compiler.h>
8
9/* Avoid too many header ordering problems. */
10struct siginfo;
11
1#ifdef __KERNEL__ 12#ifdef __KERNEL__
2# ifdef CONFIG_X86_32 13#include <linux/linkage.h>
3# include "signal_32.h" 14
4# else 15/* Most things should be clean enough to redefine this at will, if care
5# include "signal_64.h" 16 is taken to make libc match. */
6# endif 17
18#define _NSIG 64
19
20#ifdef __i386__
21# define _NSIG_BPW 32
7#else 22#else
8# ifdef __i386__ 23# define _NSIG_BPW 64
9# include "signal_32.h" 24#endif
10# else 25
11# include "signal_64.h" 26#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
12# endif 27
28typedef unsigned long old_sigset_t; /* at least 32 bits */
29
30typedef struct {
31 unsigned long sig[_NSIG_WORDS];
32} sigset_t;
33
34#else
35/* Here we must cater to libcs that poke about in kernel headers. */
36
37#define NSIG 32
38typedef unsigned long sigset_t;
39
40#endif /* __KERNEL__ */
41#endif /* __ASSEMBLY__ */
42
43#define SIGHUP 1
44#define SIGINT 2
45#define SIGQUIT 3
46#define SIGILL 4
47#define SIGTRAP 5
48#define SIGABRT 6
49#define SIGIOT 6
50#define SIGBUS 7
51#define SIGFPE 8
52#define SIGKILL 9
53#define SIGUSR1 10
54#define SIGSEGV 11
55#define SIGUSR2 12
56#define SIGPIPE 13
57#define SIGALRM 14
58#define SIGTERM 15
59#define SIGSTKFLT 16
60#define SIGCHLD 17
61#define SIGCONT 18
62#define SIGSTOP 19
63#define SIGTSTP 20
64#define SIGTTIN 21
65#define SIGTTOU 22
66#define SIGURG 23
67#define SIGXCPU 24
68#define SIGXFSZ 25
69#define SIGVTALRM 26
70#define SIGPROF 27
71#define SIGWINCH 28
72#define SIGIO 29
73#define SIGPOLL SIGIO
74/*
75#define SIGLOST 29
76*/
77#define SIGPWR 30
78#define SIGSYS 31
79#define SIGUNUSED 31
80
81/* These should not be considered constants from userland. */
82#define SIGRTMIN 32
83#define SIGRTMAX _NSIG
84
85/*
86 * SA_FLAGS values:
87 *
88 * SA_ONSTACK indicates that a registered stack_t will be used.
89 * SA_RESTART flag to get restarting signals (which were the default long ago)
90 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
91 * SA_RESETHAND clears the handler when the signal is delivered.
92 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
93 * SA_NODEFER prevents the current signal from being masked in the handler.
94 *
95 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
96 * Unix names RESETHAND and NODEFER respectively.
97 */
98#define SA_NOCLDSTOP 0x00000001u
99#define SA_NOCLDWAIT 0x00000002u
100#define SA_SIGINFO 0x00000004u
101#define SA_ONSTACK 0x08000000u
102#define SA_RESTART 0x10000000u
103#define SA_NODEFER 0x40000000u
104#define SA_RESETHAND 0x80000000u
105
106#define SA_NOMASK SA_NODEFER
107#define SA_ONESHOT SA_RESETHAND
108
109#define SA_RESTORER 0x04000000
110
111/*
112 * sigaltstack controls
113 */
114#define SS_ONSTACK 1
115#define SS_DISABLE 2
116
117#define MINSIGSTKSZ 2048
118#define SIGSTKSZ 8192
119
120#include <asm-generic/signal.h>
121
122#ifndef __ASSEMBLY__
123
124#ifdef __i386__
125# ifdef __KERNEL__
126struct old_sigaction {
127 __sighandler_t sa_handler;
128 old_sigset_t sa_mask;
129 unsigned long sa_flags;
130 __sigrestore_t sa_restorer;
131};
132
133struct sigaction {
134 __sighandler_t sa_handler;
135 unsigned long sa_flags;
136 __sigrestore_t sa_restorer;
137 sigset_t sa_mask; /* mask last for extensibility */
138};
139
140struct k_sigaction {
141 struct sigaction sa;
142};
143# else /* __KERNEL__ */
144/* Here we must cater to libcs that poke about in kernel headers. */
145
146struct sigaction {
147 union {
148 __sighandler_t _sa_handler;
149 void (*_sa_sigaction)(int, struct siginfo *, void *);
150 } _u;
151 sigset_t sa_mask;
152 unsigned long sa_flags;
153 void (*sa_restorer)(void);
154};
155
156#define sa_handler _u._sa_handler
157#define sa_sigaction _u._sa_sigaction
158
159# endif /* ! __KERNEL__ */
160#else /* __i386__ */
161
162struct sigaction {
163 __sighandler_t sa_handler;
164 unsigned long sa_flags;
165 __sigrestore_t sa_restorer;
166 sigset_t sa_mask; /* mask last for extensibility */
167};
168
169struct k_sigaction {
170 struct sigaction sa;
171};
172
173#endif /* !__i386__ */
174
175typedef struct sigaltstack {
176 void __user *ss_sp;
177 int ss_flags;
178 size_t ss_size;
179} stack_t;
180
181#ifdef __KERNEL__
182#include <asm/sigcontext.h>
183
184#ifdef __386__
185
186#define __HAVE_ARCH_SIG_BITOPS
187
188#define sigaddset(set,sig) \
189 (__builtin_constantp(sig) ? \
190 __const_sigaddset((set),(sig)) : \
191 __gen_sigaddset((set),(sig)))
192
193static __inline__ void __gen_sigaddset(sigset_t *set, int _sig)
194{
195 __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
196}
197
198static __inline__ void __const_sigaddset(sigset_t *set, int _sig)
199{
200 unsigned long sig = _sig - 1;
201 set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW);
202}
203
204#define sigdelset(set,sig) \
205 (__builtin_constant_p(sig) ? \
206 __const_sigdelset((set),(sig)) : \
207 __gen_sigdelset((set),(sig)))
208
209
210static __inline__ void __gen_sigdelset(sigset_t *set, int _sig)
211{
212 __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
213}
214
215static __inline__ void __const_sigdelset(sigset_t *set, int _sig)
216{
217 unsigned long sig = _sig - 1;
218 set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW));
219}
220
221static __inline__ int __const_sigismember(sigset_t *set, int _sig)
222{
223 unsigned long sig = _sig - 1;
224 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
225}
226
227static __inline__ int __gen_sigismember(sigset_t *set, int _sig)
228{
229 int ret;
230 __asm__("btl %2,%1\n\tsbbl %0,%0"
231 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
232 return ret;
233}
234
235#define sigismember(set,sig) \
236 (__builtin_constant_p(sig) ? \
237 __const_sigismember((set),(sig)) : \
238 __gen_sigismember((set),(sig)))
239
240static __inline__ int sigfindinword(unsigned long word)
241{
242 __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
243 return word;
244}
245
246struct pt_regs;
247
248#define ptrace_signal_deliver(regs, cookie) \
249 do { \
250 if (current->ptrace & PT_DTRACE) { \
251 current->ptrace &= ~PT_DTRACE; \
252 (regs)->eflags &= ~TF_MASK; \
253 } \
254 } while (0)
255
256#else /* __i386__ */
257
258#undef __HAVE_ARCH_SIG_BITOPS
259
260#define ptrace_signal_deliver(regs, cookie) do { } while (0)
261
262#endif /* !__i386__ */
263#endif /* __KERNEL__ */
264#endif /* __ASSEMBLY__ */
265
13#endif 266#endif
diff --git a/include/asm-x86/signal_32.h b/include/asm-x86/signal_32.h
deleted file mode 100644
index c3e8adec5918..000000000000
--- a/include/asm-x86/signal_32.h
+++ /dev/null
@@ -1,232 +0,0 @@
1#ifndef _ASMi386_SIGNAL_H
2#define _ASMi386_SIGNAL_H
3
4#include <linux/types.h>
5#include <linux/time.h>
6#include <linux/compiler.h>
7
8/* Avoid too many header ordering problems. */
9struct siginfo;
10
11#ifdef __KERNEL__
12
13#include <linux/linkage.h>
14
15/* Most things should be clean enough to redefine this at will, if care
16 is taken to make libc match. */
17
18#define _NSIG 64
19#define _NSIG_BPW 32
20#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
21
22typedef unsigned long old_sigset_t; /* at least 32 bits */
23
24typedef struct {
25 unsigned long sig[_NSIG_WORDS];
26} sigset_t;
27
28#else
29/* Here we must cater to libcs that poke about in kernel headers. */
30
31#define NSIG 32
32typedef unsigned long sigset_t;
33
34#endif /* __KERNEL__ */
35
36#define SIGHUP 1
37#define SIGINT 2
38#define SIGQUIT 3
39#define SIGILL 4
40#define SIGTRAP 5
41#define SIGABRT 6
42#define SIGIOT 6
43#define SIGBUS 7
44#define SIGFPE 8
45#define SIGKILL 9
46#define SIGUSR1 10
47#define SIGSEGV 11
48#define SIGUSR2 12
49#define SIGPIPE 13
50#define SIGALRM 14
51#define SIGTERM 15
52#define SIGSTKFLT 16
53#define SIGCHLD 17
54#define SIGCONT 18
55#define SIGSTOP 19
56#define SIGTSTP 20
57#define SIGTTIN 21
58#define SIGTTOU 22
59#define SIGURG 23
60#define SIGXCPU 24
61#define SIGXFSZ 25
62#define SIGVTALRM 26
63#define SIGPROF 27
64#define SIGWINCH 28
65#define SIGIO 29
66#define SIGPOLL SIGIO
67/*
68#define SIGLOST 29
69*/
70#define SIGPWR 30
71#define SIGSYS 31
72#define SIGUNUSED 31
73
74/* These should not be considered constants from userland. */
75#define SIGRTMIN 32
76#define SIGRTMAX _NSIG
77
78/*
79 * SA_FLAGS values:
80 *
81 * SA_ONSTACK indicates that a registered stack_t will be used.
82 * SA_RESTART flag to get restarting signals (which were the default long ago)
83 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
84 * SA_RESETHAND clears the handler when the signal is delivered.
85 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
86 * SA_NODEFER prevents the current signal from being masked in the handler.
87 *
88 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
89 * Unix names RESETHAND and NODEFER respectively.
90 */
91#define SA_NOCLDSTOP 0x00000001u
92#define SA_NOCLDWAIT 0x00000002u
93#define SA_SIGINFO 0x00000004u
94#define SA_ONSTACK 0x08000000u
95#define SA_RESTART 0x10000000u
96#define SA_NODEFER 0x40000000u
97#define SA_RESETHAND 0x80000000u
98
99#define SA_NOMASK SA_NODEFER
100#define SA_ONESHOT SA_RESETHAND
101
102#define SA_RESTORER 0x04000000
103
104/*
105 * sigaltstack controls
106 */
107#define SS_ONSTACK 1
108#define SS_DISABLE 2
109
110#define MINSIGSTKSZ 2048
111#define SIGSTKSZ 8192
112
113#include <asm-generic/signal.h>
114
115#ifdef __KERNEL__
116struct old_sigaction {
117 __sighandler_t sa_handler;
118 old_sigset_t sa_mask;
119 unsigned long sa_flags;
120 __sigrestore_t sa_restorer;
121};
122
123struct sigaction {
124 __sighandler_t sa_handler;
125 unsigned long sa_flags;
126 __sigrestore_t sa_restorer;
127 sigset_t sa_mask; /* mask last for extensibility */
128};
129
130struct k_sigaction {
131 struct sigaction sa;
132};
133#else
134/* Here we must cater to libcs that poke about in kernel headers. */
135
136struct sigaction {
137 union {
138 __sighandler_t _sa_handler;
139 void (*_sa_sigaction)(int, struct siginfo *, void *);
140 } _u;
141 sigset_t sa_mask;
142 unsigned long sa_flags;
143 void (*sa_restorer)(void);
144};
145
146#define sa_handler _u._sa_handler
147#define sa_sigaction _u._sa_sigaction
148
149#endif /* __KERNEL__ */
150
151typedef struct sigaltstack {
152 void __user *ss_sp;
153 int ss_flags;
154 size_t ss_size;
155} stack_t;
156
157#ifdef __KERNEL__
158#include <asm/sigcontext.h>
159
160#define __HAVE_ARCH_SIG_BITOPS
161
162#define sigaddset(set,sig) \
163 (__builtin_constant_p(sig) ? \
164 __const_sigaddset((set),(sig)) : \
165 __gen_sigaddset((set),(sig)))
166
167static __inline__ void __gen_sigaddset(sigset_t *set, int _sig)
168{
169 __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
170}
171
172static __inline__ void __const_sigaddset(sigset_t *set, int _sig)
173{
174 unsigned long sig = _sig - 1;
175 set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW);
176}
177
178#define sigdelset(set,sig) \
179 (__builtin_constant_p(sig) ? \
180 __const_sigdelset((set),(sig)) : \
181 __gen_sigdelset((set),(sig)))
182
183
184static __inline__ void __gen_sigdelset(sigset_t *set, int _sig)
185{
186 __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
187}
188
189static __inline__ void __const_sigdelset(sigset_t *set, int _sig)
190{
191 unsigned long sig = _sig - 1;
192 set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW));
193}
194
195static __inline__ int __const_sigismember(sigset_t *set, int _sig)
196{
197 unsigned long sig = _sig - 1;
198 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
199}
200
201static __inline__ int __gen_sigismember(sigset_t *set, int _sig)
202{
203 int ret;
204 __asm__("btl %2,%1\n\tsbbl %0,%0"
205 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
206 return ret;
207}
208
209#define sigismember(set,sig) \
210 (__builtin_constant_p(sig) ? \
211 __const_sigismember((set),(sig)) : \
212 __gen_sigismember((set),(sig)))
213
214static __inline__ int sigfindinword(unsigned long word)
215{
216 __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
217 return word;
218}
219
220struct pt_regs;
221
222#define ptrace_signal_deliver(regs, cookie) \
223 do { \
224 if (current->ptrace & PT_DTRACE) { \
225 current->ptrace &= ~PT_DTRACE; \
226 (regs)->eflags &= ~TF_MASK; \
227 } \
228 } while (0)
229
230#endif /* __KERNEL__ */
231
232#endif
diff --git a/include/asm-x86/signal_64.h b/include/asm-x86/signal_64.h
deleted file mode 100644
index 4581f978b299..000000000000
--- a/include/asm-x86/signal_64.h
+++ /dev/null
@@ -1,181 +0,0 @@
1#ifndef _ASMx8664_SIGNAL_H
2#define _ASMx8664_SIGNAL_H
3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6#include <linux/time.h>
7
8/* Avoid too many header ordering problems. */
9struct siginfo;
10
11#ifdef __KERNEL__
12#include <linux/linkage.h>
13/* Most things should be clean enough to redefine this at will, if care
14 is taken to make libc match. */
15
16#define _NSIG 64
17#define _NSIG_BPW 64
18#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
19
20typedef unsigned long old_sigset_t; /* at least 32 bits */
21
22typedef struct {
23 unsigned long sig[_NSIG_WORDS];
24} sigset_t;
25
26
27#else
28/* Here we must cater to libcs that poke about in kernel headers. */
29
30#define NSIG 32
31typedef unsigned long sigset_t;
32
33#endif /* __KERNEL__ */
34#endif
35
36#define SIGHUP 1
37#define SIGINT 2
38#define SIGQUIT 3
39#define SIGILL 4
40#define SIGTRAP 5
41#define SIGABRT 6
42#define SIGIOT 6
43#define SIGBUS 7
44#define SIGFPE 8
45#define SIGKILL 9
46#define SIGUSR1 10
47#define SIGSEGV 11
48#define SIGUSR2 12
49#define SIGPIPE 13
50#define SIGALRM 14
51#define SIGTERM 15
52#define SIGSTKFLT 16
53#define SIGCHLD 17
54#define SIGCONT 18
55#define SIGSTOP 19
56#define SIGTSTP 20
57#define SIGTTIN 21
58#define SIGTTOU 22
59#define SIGURG 23
60#define SIGXCPU 24
61#define SIGXFSZ 25
62#define SIGVTALRM 26
63#define SIGPROF 27
64#define SIGWINCH 28
65#define SIGIO 29
66#define SIGPOLL SIGIO
67/*
68#define SIGLOST 29
69*/
70#define SIGPWR 30
71#define SIGSYS 31
72#define SIGUNUSED 31
73
74/* These should not be considered constants from userland. */
75#define SIGRTMIN 32
76#define SIGRTMAX _NSIG
77
78/*
79 * SA_FLAGS values:
80 *
81 * SA_ONSTACK indicates that a registered stack_t will be used.
82 * SA_RESTART flag to get restarting signals (which were the default long ago)
83 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
84 * SA_RESETHAND clears the handler when the signal is delivered.
85 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
86 * SA_NODEFER prevents the current signal from being masked in the handler.
87 *
88 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
89 * Unix names RESETHAND and NODEFER respectively.
90 */
91#define SA_NOCLDSTOP 0x00000001
92#define SA_NOCLDWAIT 0x00000002
93#define SA_SIGINFO 0x00000004
94#define SA_ONSTACK 0x08000000
95#define SA_RESTART 0x10000000
96#define SA_NODEFER 0x40000000
97#define SA_RESETHAND 0x80000000
98
99#define SA_NOMASK SA_NODEFER
100#define SA_ONESHOT SA_RESETHAND
101
102#define SA_RESTORER 0x04000000
103
104/*
105 * sigaltstack controls
106 */
107#define SS_ONSTACK 1
108#define SS_DISABLE 2
109
110#define MINSIGSTKSZ 2048
111#define SIGSTKSZ 8192
112
113#include <asm-generic/signal.h>
114
115#ifndef __ASSEMBLY__
116
117struct sigaction {
118 __sighandler_t sa_handler;
119 unsigned long sa_flags;
120 __sigrestore_t sa_restorer;
121 sigset_t sa_mask; /* mask last for extensibility */
122};
123
124struct k_sigaction {
125 struct sigaction sa;
126};
127
128typedef struct sigaltstack {
129 void __user *ss_sp;
130 int ss_flags;
131 size_t ss_size;
132} stack_t;
133
134#ifdef __KERNEL__
135#include <asm/sigcontext.h>
136
137#undef __HAVE_ARCH_SIG_BITOPS
138#if 0
139
140static inline void sigaddset(sigset_t *set, int _sig)
141{
142 __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
143}
144
145static inline void sigdelset(sigset_t *set, int _sig)
146{
147 __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
148}
149
150static inline int __const_sigismember(sigset_t *set, int _sig)
151{
152 unsigned long sig = _sig - 1;
153 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1)));
154}
155
156static inline int __gen_sigismember(sigset_t *set, int _sig)
157{
158 int ret;
159 __asm__("btq %2,%1\n\tsbbq %0,%0"
160 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
161 return ret;
162}
163
164#define sigismember(set,sig) \
165 (__builtin_constant_p(sig) ? \
166 __const_sigismember((set),(sig)) : \
167 __gen_sigismember((set),(sig)))
168
169static inline int sigfindinword(unsigned long word)
170{
171 __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc");
172 return word;
173}
174#endif
175#endif
176
177#define ptrace_signal_deliver(regs, cookie) do { } while (0)
178
179#endif /* __KERNEL__ */
180
181#endif
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
index 6f0e0273b646..ab612b0ff270 100644
--- a/include/asm-x86/smp_64.h
+++ b/include/asm-x86/smp_64.h
@@ -76,6 +76,8 @@ extern unsigned __cpuinitdata disabled_cpus;
76 76
77#endif /* CONFIG_SMP */ 77#endif /* CONFIG_SMP */
78 78
79#define safe_smp_processor_id() smp_processor_id()
80
79static inline int hard_smp_processor_id(void) 81static inline int hard_smp_processor_id(void)
80{ 82{
81 /* we don't want to mark this access volatile - bad code generation */ 83 /* we don't want to mark this access volatile - bad code generation */
diff --git a/include/asm-x86/stat.h b/include/asm-x86/stat.h
index 3ff6b50ef833..5c22dcb5d17e 100644
--- a/include/asm-x86/stat.h
+++ b/include/asm-x86/stat.h
@@ -1,13 +1,114 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_STAT_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_STAT_H
3# include "stat_32.h" 3
4# else 4#define STAT_HAVE_NSEC 1
5# include "stat_64.h" 5
6# endif 6#ifdef __i386__
7struct stat {
8 unsigned long st_dev;
9 unsigned long st_ino;
10 unsigned short st_mode;
11 unsigned short st_nlink;
12 unsigned short st_uid;
13 unsigned short st_gid;
14 unsigned long st_rdev;
15 unsigned long st_size;
16 unsigned long st_blksize;
17 unsigned long st_blocks;
18 unsigned long st_atime;
19 unsigned long st_atime_nsec;
20 unsigned long st_mtime;
21 unsigned long st_mtime_nsec;
22 unsigned long st_ctime;
23 unsigned long st_ctime_nsec;
24 unsigned long __unused4;
25 unsigned long __unused5;
26};
27
28#define STAT64_HAS_BROKEN_ST_INO 1
29
30/* This matches struct stat64 in glibc2.1, hence the absolutely
31 * insane amounts of padding around dev_t's.
32 */
33struct stat64 {
34 unsigned long long st_dev;
35 unsigned char __pad0[4];
36
37 unsigned long __st_ino;
38
39 unsigned int st_mode;
40 unsigned int st_nlink;
41
42 unsigned long st_uid;
43 unsigned long st_gid;
44
45 unsigned long long st_rdev;
46 unsigned char __pad3[4];
47
48 long long st_size;
49 unsigned long st_blksize;
50
51 /* Number 512-byte blocks allocated. */
52 unsigned long long st_blocks;
53
54 unsigned long st_atime;
55 unsigned long st_atime_nsec;
56
57 unsigned long st_mtime;
58 unsigned int st_mtime_nsec;
59
60 unsigned long st_ctime;
61 unsigned long st_ctime_nsec;
62
63 unsigned long long st_ino;
64};
65
66#else /* __i386__ */
67
68struct stat {
69 unsigned long st_dev;
70 unsigned long st_ino;
71 unsigned long st_nlink;
72
73 unsigned int st_mode;
74 unsigned int st_uid;
75 unsigned int st_gid;
76 unsigned int __pad0;
77 unsigned long st_rdev;
78 long st_size;
79 long st_blksize;
80 long st_blocks; /* Number 512-byte blocks allocated. */
81
82 unsigned long st_atime;
83 unsigned long st_atime_nsec;
84 unsigned long st_mtime;
85 unsigned long st_mtime_nsec;
86 unsigned long st_ctime;
87 unsigned long st_ctime_nsec;
88 long __unused[3];
89};
90#endif
91
92/* for 32bit emulation and 32 bit kernels */
93struct __old_kernel_stat {
94 unsigned short st_dev;
95 unsigned short st_ino;
96 unsigned short st_mode;
97 unsigned short st_nlink;
98 unsigned short st_uid;
99 unsigned short st_gid;
100 unsigned short st_rdev;
101#ifdef __i386__
102 unsigned long st_size;
103 unsigned long st_atime;
104 unsigned long st_mtime;
105 unsigned long st_ctime;
7#else 106#else
8# ifdef __i386__ 107 unsigned int st_size;
9# include "stat_32.h" 108 unsigned int st_atime;
10# else 109 unsigned int st_mtime;
11# include "stat_64.h" 110 unsigned int st_ctime;
12# endif 111#endif
112};
113
13#endif 114#endif
diff --git a/include/asm-x86/stat_32.h b/include/asm-x86/stat_32.h
deleted file mode 100644
index 67eae78323ba..000000000000
--- a/include/asm-x86/stat_32.h
+++ /dev/null
@@ -1,77 +0,0 @@
1#ifndef _I386_STAT_H
2#define _I386_STAT_H
3
4struct __old_kernel_stat {
5 unsigned short st_dev;
6 unsigned short st_ino;
7 unsigned short st_mode;
8 unsigned short st_nlink;
9 unsigned short st_uid;
10 unsigned short st_gid;
11 unsigned short st_rdev;
12 unsigned long st_size;
13 unsigned long st_atime;
14 unsigned long st_mtime;
15 unsigned long st_ctime;
16};
17
18struct stat {
19 unsigned long st_dev;
20 unsigned long st_ino;
21 unsigned short st_mode;
22 unsigned short st_nlink;
23 unsigned short st_uid;
24 unsigned short st_gid;
25 unsigned long st_rdev;
26 unsigned long st_size;
27 unsigned long st_blksize;
28 unsigned long st_blocks;
29 unsigned long st_atime;
30 unsigned long st_atime_nsec;
31 unsigned long st_mtime;
32 unsigned long st_mtime_nsec;
33 unsigned long st_ctime;
34 unsigned long st_ctime_nsec;
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39/* This matches struct stat64 in glibc2.1, hence the absolutely
40 * insane amounts of padding around dev_t's.
41 */
42struct stat64 {
43 unsigned long long st_dev;
44 unsigned char __pad0[4];
45
46#define STAT64_HAS_BROKEN_ST_INO 1
47 unsigned long __st_ino;
48
49 unsigned int st_mode;
50 unsigned int st_nlink;
51
52 unsigned long st_uid;
53 unsigned long st_gid;
54
55 unsigned long long st_rdev;
56 unsigned char __pad3[4];
57
58 long long st_size;
59 unsigned long st_blksize;
60
61 unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
62
63 unsigned long st_atime;
64 unsigned long st_atime_nsec;
65
66 unsigned long st_mtime;
67 unsigned int st_mtime_nsec;
68
69 unsigned long st_ctime;
70 unsigned long st_ctime_nsec;
71
72 unsigned long long st_ino;
73};
74
75#define STAT_HAVE_NSEC 1
76
77#endif
diff --git a/include/asm-x86/stat_64.h b/include/asm-x86/stat_64.h
deleted file mode 100644
index fd9f00d560f8..000000000000
--- a/include/asm-x86/stat_64.h
+++ /dev/null
@@ -1,44 +0,0 @@
1#ifndef _ASM_X86_64_STAT_H
2#define _ASM_X86_64_STAT_H
3
4#define STAT_HAVE_NSEC 1
5
6struct stat {
7 unsigned long st_dev;
8 unsigned long st_ino;
9 unsigned long st_nlink;
10
11 unsigned int st_mode;
12 unsigned int st_uid;
13 unsigned int st_gid;
14 unsigned int __pad0;
15 unsigned long st_rdev;
16 long st_size;
17 long st_blksize;
18 long st_blocks; /* Number 512-byte blocks allocated. */
19
20 unsigned long st_atime;
21 unsigned long st_atime_nsec;
22 unsigned long st_mtime;
23 unsigned long st_mtime_nsec;
24 unsigned long st_ctime;
25 unsigned long st_ctime_nsec;
26 long __unused[3];
27};
28
29/* For 32bit emulation */
30struct __old_kernel_stat {
31 unsigned short st_dev;
32 unsigned short st_ino;
33 unsigned short st_mode;
34 unsigned short st_nlink;
35 unsigned short st_uid;
36 unsigned short st_gid;
37 unsigned short st_rdev;
38 unsigned int st_size;
39 unsigned int st_atime;
40 unsigned int st_mtime;
41 unsigned int st_ctime;
42};
43
44#endif
diff --git a/include/asm-x86/statfs.h b/include/asm-x86/statfs.h
index 327fb5d7a148..7c651aa97252 100644
--- a/include/asm-x86/statfs.h
+++ b/include/asm-x86/statfs.h
@@ -1,13 +1,63 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_X86_STATFS_H
2# ifdef CONFIG_X86_32 2#define _ASM_X86_STATFS_H
3# include "statfs_32.h" 3
4# else 4#ifdef __i386__
5# include "statfs_64.h" 5#include <asm-generic/statfs.h>
6# endif
7#else 6#else
8# ifdef __i386__ 7
9# include "statfs_32.h" 8#ifndef __KERNEL_STRICT_NAMES
10# else 9
11# include "statfs_64.h" 10#include <linux/types.h>
12# endif 11
12typedef __kernel_fsid_t fsid_t;
13
14#endif
15
16/*
17 * This is ugly -- we're already 64-bit clean, so just duplicate the
18 * definitions.
19 */
20struct statfs {
21 long f_type;
22 long f_bsize;
23 long f_blocks;
24 long f_bfree;
25 long f_bavail;
26 long f_files;
27 long f_ffree;
28 __kernel_fsid_t f_fsid;
29 long f_namelen;
30 long f_frsize;
31 long f_spare[5];
32};
33
34struct statfs64 {
35 long f_type;
36 long f_bsize;
37 long f_blocks;
38 long f_bfree;
39 long f_bavail;
40 long f_files;
41 long f_ffree;
42 __kernel_fsid_t f_fsid;
43 long f_namelen;
44 long f_frsize;
45 long f_spare[5];
46};
47
48struct compat_statfs64 {
49 __u32 f_type;
50 __u32 f_bsize;
51 __u64 f_blocks;
52 __u64 f_bfree;
53 __u64 f_bavail;
54 __u64 f_files;
55 __u64 f_ffree;
56 __kernel_fsid_t f_fsid;
57 __u32 f_namelen;
58 __u32 f_frsize;
59 __u32 f_spare[5];
60} __attribute__((packed));
61
62#endif /* !__i386__ */
13#endif 63#endif
diff --git a/include/asm-x86/statfs_32.h b/include/asm-x86/statfs_32.h
deleted file mode 100644
index 24972c175132..000000000000
--- a/include/asm-x86/statfs_32.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _I386_STATFS_H
2#define _I386_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif
diff --git a/include/asm-x86/statfs_64.h b/include/asm-x86/statfs_64.h
deleted file mode 100644
index b3f4718af30b..000000000000
--- a/include/asm-x86/statfs_64.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _X86_64_STATFS_H
2#define _X86_64_STATFS_H
3
4#ifndef __KERNEL_STRICT_NAMES
5
6#include <linux/types.h>
7
8typedef __kernel_fsid_t fsid_t;
9
10#endif
11
12/*
13 * This is ugly -- we're already 64-bit clean, so just duplicate the
14 * definitions.
15 */
16struct statfs {
17 long f_type;
18 long f_bsize;
19 long f_blocks;
20 long f_bfree;
21 long f_bavail;
22 long f_files;
23 long f_ffree;
24 __kernel_fsid_t f_fsid;
25 long f_namelen;
26 long f_frsize;
27 long f_spare[5];
28};
29
30struct statfs64 {
31 long f_type;
32 long f_bsize;
33 long f_blocks;
34 long f_bfree;
35 long f_bavail;
36 long f_files;
37 long f_ffree;
38 __kernel_fsid_t f_fsid;
39 long f_namelen;
40 long f_frsize;
41 long f_spare[5];
42};
43
44struct compat_statfs64 {
45 __u32 f_type;
46 __u32 f_bsize;
47 __u64 f_blocks;
48 __u64 f_bfree;
49 __u64 f_bavail;
50 __u64 f_files;
51 __u64 f_ffree;
52 __kernel_fsid_t f_fsid;
53 __u32 f_namelen;
54 __u32 f_frsize;
55 __u32 f_spare[5];
56} __attribute__((packed));
57
58#endif
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h
index 9440a7a1b99a..c505a76bcf6e 100644
--- a/include/asm-x86/suspend_64.h
+++ b/include/asm-x86/suspend_64.h
@@ -3,6 +3,9 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef __ASM_X86_64_SUSPEND_H
7#define __ASM_X86_64_SUSPEND_H
8
6#include <asm/desc.h> 9#include <asm/desc.h>
7#include <asm/i387.h> 10#include <asm/i387.h>
8 11
@@ -12,8 +15,9 @@ arch_prepare_suspend(void)
12 return 0; 15 return 0;
13} 16}
14 17
15/* Image of the saved processor state. If you touch this, fix acpi_wakeup.S. */ 18/* Image of the saved processor state. If you touch this, fix acpi/wakeup.S. */
16struct saved_context { 19struct saved_context {
20 struct pt_regs regs;
17 u16 ds, es, fs, gs, ss; 21 u16 ds, es, fs, gs, ss;
18 unsigned long gs_base, gs_kernel_base, fs_base; 22 unsigned long gs_base, gs_kernel_base, fs_base;
19 unsigned long cr0, cr2, cr3, cr4, cr8; 23 unsigned long cr0, cr2, cr3, cr4, cr8;
@@ -29,29 +33,16 @@ struct saved_context {
29 unsigned long tr; 33 unsigned long tr;
30 unsigned long safety; 34 unsigned long safety;
31 unsigned long return_address; 35 unsigned long return_address;
32 unsigned long eflags;
33} __attribute__((packed)); 36} __attribute__((packed));
34 37
35/* We'll access these from assembly, so we'd better have them outside struct */
36extern unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
37extern unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
38extern unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
39extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
40extern unsigned long saved_context_eflags;
41
42#define loaddebug(thread,register) \ 38#define loaddebug(thread,register) \
43 set_debugreg((thread)->debugreg##register, register) 39 set_debugreg((thread)->debugreg##register, register)
44 40
45extern void fix_processor_context(void); 41extern void fix_processor_context(void);
46 42
47extern unsigned long saved_rip;
48extern unsigned long saved_rsp;
49extern unsigned long saved_rbp;
50extern unsigned long saved_rbx;
51extern unsigned long saved_rsi;
52extern unsigned long saved_rdi;
53
54/* routines for saving/restoring kernel state */ 43/* routines for saving/restoring kernel state */
55extern int acpi_save_state_mem(void); 44extern int acpi_save_state_mem(void);
56extern char core_restore_code; 45extern char core_restore_code;
57extern char restore_registers; 46extern char restore_registers;
47
48#endif /* __ASM_X86_64_SUSPEND_H */