aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/x86/include
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/Kbuild30
-rw-r--r--arch/x86/include/asm/acpi.h9
-rw-r--r--arch/x86/include/asm/alternative-asm.h13
-rw-r--r--arch/x86/include/asm/alternative.h92
-rw-r--r--arch/x86/include/asm/amd_nb.h27
-rw-r--r--arch/x86/include/asm/apic.h105
-rw-r--r--arch/x86/include/asm/apic_flat_64.h7
-rw-r--r--arch/x86/include/asm/apicdef.h5
-rw-r--r--arch/x86/include/asm/archrandom.h75
-rw-r--r--arch/x86/include/asm/asm.h38
-rw-r--r--arch/x86/include/asm/atomic.h50
-rw-r--r--arch/x86/include/asm/atomic64_32.h158
-rw-r--r--arch/x86/include/asm/atomic64_64.h6
-rw-r--r--arch/x86/include/asm/barrier.h116
-rw-r--r--arch/x86/include/asm/bitops.h87
-rw-r--r--arch/x86/include/asm/boot.h11
-rw-r--r--arch/x86/include/asm/bug.h4
-rw-r--r--arch/x86/include/asm/cacheflush.h1
-rw-r--r--arch/x86/include/asm/calling.h50
-rw-r--r--arch/x86/include/asm/checksum.h4
-rw-r--r--arch/x86/include/asm/clocksource.h1
-rw-r--r--arch/x86/include/asm/cmpxchg.h232
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h215
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h174
-rw-r--r--arch/x86/include/asm/compat.h117
-rw-r--r--arch/x86/include/asm/context_tracking.h31
-rw-r--r--arch/x86/include/asm/cpu.h4
-rw-r--r--arch/x86/include/asm/cpu_device_id.h13
-rw-r--r--arch/x86/include/asm/cpufeature.h31
-rw-r--r--arch/x86/include/asm/crypto/ablk_helper.h31
-rw-r--r--arch/x86/include/asm/crypto/aes.h11
-rw-r--r--arch/x86/include/asm/crypto/camellia.h82
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h121
-rw-r--r--arch/x86/include/asm/crypto/serpent-avx.h19
-rw-r--r--arch/x86/include/asm/crypto/serpent-sse2.h63
-rw-r--r--arch/x86/include/asm/crypto/twofish.h46
-rw-r--r--arch/x86/include/asm/current.h2
-rw-r--r--arch/x86/include/asm/debugreg.h158
-rw-r--r--arch/x86/include/asm/desc.h13
-rw-r--r--arch/x86/include/asm/device.h7
-rw-r--r--arch/x86/include/asm/div64.h22
-rw-r--r--arch/x86/include/asm/dma-contiguous.h13
-rw-r--r--arch/x86/include/asm/dma-mapping.h43
-rw-r--r--arch/x86/include/asm/dwarf2.h2
-rw-r--r--arch/x86/include/asm/e820.h76
-rw-r--r--arch/x86/include/asm/efi.h12
-rw-r--r--arch/x86/include/asm/elf.h59
-rw-r--r--arch/x86/include/asm/emergency-restart.h2
-rw-r--r--arch/x86/include/asm/entry_arch.h9
-rw-r--r--arch/x86/include/asm/exec.h1
-rw-r--r--arch/x86/include/asm/fixmap.h7
-rw-r--r--arch/x86/include/asm/floppy.h2
-rw-r--r--arch/x86/include/asm/fpu-internal.h619
-rw-r--r--arch/x86/include/asm/ftrace.h59
-rw-r--r--arch/x86/include/asm/futex.h32
-rw-r--r--arch/x86/include/asm/gpio.h57
-rw-r--r--arch/x86/include/asm/hardirq.h15
-rw-r--r--arch/x86/include/asm/highmem.h2
-rw-r--r--arch/x86/include/asm/hpet.h2
-rw-r--r--arch/x86/include/asm/hugetlb.h4
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h5
-rw-r--r--arch/x86/include/asm/hypervisor.h1
-rw-r--r--arch/x86/include/asm/i387.h409
-rw-r--r--arch/x86/include/asm/ia32.h69
-rw-r--r--arch/x86/include/asm/ia32_unistd.h13
-rw-r--r--arch/x86/include/asm/idle.h8
-rw-r--r--arch/x86/include/asm/inat.h5
-rw-r--r--arch/x86/include/asm/init.h2
-rw-r--r--arch/x86/include/asm/insn.h25
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h34
-rw-r--r--arch/x86/include/asm/io_apic.h26
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/iommu_table.h6
-rw-r--r--arch/x86/include/asm/irq_regs.h4
-rw-r--r--arch/x86/include/asm/irq_remapping.h118
-rw-r--r--arch/x86/include/asm/irq_vectors.h23
-rw-r--r--arch/x86/include/asm/ist.h17
-rw-r--r--arch/x86/include/asm/jump_label.h6
-rw-r--r--arch/x86/include/asm/kbdleds.h17
-rw-r--r--arch/x86/include/asm/kdebug.h1
-rw-r--r--arch/x86/include/asm/kexec.h3
-rw-r--r--arch/x86/include/asm/kgdb.h10
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/include/asm/kvm_emulate.h71
-rw-r--r--arch/x86/include/asm/kvm_guest.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h280
-rw-r--r--arch/x86/include/asm/kvm_para.h121
-rw-r--r--arch/x86/include/asm/local.h19
-rw-r--r--arch/x86/include/asm/mach_timer.h2
-rw-r--r--arch/x86/include/asm/mach_traps.h2
-rw-r--r--arch/x86/include/asm/mc146818rtc.h5
-rw-r--r--arch/x86/include/asm/mce.h148
-rw-r--r--arch/x86/include/asm/microcode.h12
-rw-r--r--arch/x86/include/asm/mmu_context.h12
-rw-r--r--arch/x86/include/asm/mmzone.h4
-rw-r--r--arch/x86/include/asm/mmzone_32.h6
-rw-r--r--arch/x86/include/asm/module.h2
-rw-r--r--arch/x86/include/asm/mpspec.h2
-rw-r--r--arch/x86/include/asm/mpspec_def.h3
-rw-r--r--arch/x86/include/asm/mrst.h22
-rw-r--r--arch/x86/include/asm/msr.h57
-rw-r--r--arch/x86/include/asm/mtrr.h85
-rw-r--r--arch/x86/include/asm/mutex.h4
-rw-r--r--arch/x86/include/asm/nmi.h55
-rw-r--r--arch/x86/include/asm/nops.h4
-rw-r--r--arch/x86/include/asm/numa.h4
-rw-r--r--arch/x86/include/asm/numachip/numachip.h19
-rw-r--r--arch/x86/include/asm/numachip/numachip_csr.h167
-rw-r--r--arch/x86/include/asm/olpc.h19
-rw-r--r--arch/x86/include/asm/page_32_types.h4
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/page_types.h1
-rw-r--r--arch/x86/include/asm/paravirt.h61
-rw-r--r--arch/x86/include/asm/paravirt_types.h5
-rw-r--r--arch/x86/include/asm/parport.h4
-rw-r--r--arch/x86/include/asm/pci.h23
-rw-r--r--arch/x86/include/asm/pci_x86.h22
-rw-r--r--arch/x86/include/asm/percpu.h121
-rw-r--r--arch/x86/include/asm/perf_event.h145
-rw-r--r--arch/x86/include/asm/pgtable-2level.h4
-rw-r--r--arch/x86/include/asm/pgtable-3level.h60
-rw-r--r--arch/x86/include/asm/pgtable.h32
-rw-r--r--arch/x86/include/asm/pgtable_32.h1
-rw-r--r--arch/x86/include/asm/pgtable_64.h9
-rw-r--r--arch/x86/include/asm/pgtable_types.h30
-rw-r--r--arch/x86/include/asm/posix_types.h12
-rw-r--r--arch/x86/include/asm/processor-flags.h93
-rw-r--r--arch/x86/include/asm/processor.h171
-rw-r--r--arch/x86/include/asm/prom.h10
-rw-r--r--arch/x86/include/asm/ptrace.h107
-rw-r--r--arch/x86/include/asm/pvclock-abi.h1
-rw-r--r--arch/x86/include/asm/pvclock.h47
-rw-r--r--arch/x86/include/asm/realmode.h63
-rw-r--r--arch/x86/include/asm/reboot.h6
-rw-r--r--arch/x86/include/asm/rwsem.h8
-rw-r--r--arch/x86/include/asm/seccomp.h4
-rw-r--r--arch/x86/include/asm/segment.h62
-rw-r--r--arch/x86/include/asm/setup.h7
-rw-r--r--arch/x86/include/asm/sigcontext.h213
-rw-r--r--arch/x86/include/asm/sigframe.h13
-rw-r--r--arch/x86/include/asm/sighandling.h22
-rw-r--r--arch/x86/include/asm/signal.h144
-rw-r--r--arch/x86/include/asm/smap.h91
-rw-r--r--arch/x86/include/asm/smp.h43
-rw-r--r--arch/x86/include/asm/special_insns.h199
-rw-r--r--arch/x86/include/asm/spinlock.h124
-rw-r--r--arch/x86/include/asm/spinlock_types.h21
-rw-r--r--arch/x86/include/asm/sta2x11.h12
-rw-r--r--arch/x86/include/asm/stackprotector.h5
-rw-r--r--arch/x86/include/asm/string.h4
-rw-r--r--arch/x86/include/asm/suspend.h4
-rw-r--r--arch/x86/include/asm/svm.h79
-rw-r--r--arch/x86/include/asm/switch_to.h129
-rw-r--r--arch/x86/include/asm/sys_ia32.h15
-rw-r--r--arch/x86/include/asm/syscall.h33
-rw-r--r--arch/x86/include/asm/syscalls.h12
-rw-r--r--arch/x86/include/asm/thread_info.h86
-rw-r--r--arch/x86/include/asm/timer.h8
-rw-r--r--arch/x86/include/asm/tlb.h9
-rw-r--r--arch/x86/include/asm/tlbflush.h64
-rw-r--r--arch/x86/include/asm/topology.h42
-rw-r--r--arch/x86/include/asm/trace_clock.h20
-rw-r--r--arch/x86/include/asm/traps.h25
-rw-r--r--arch/x86/include/asm/tsc.h6
-rw-r--r--arch/x86/include/asm/uaccess.h119
-rw-r--r--arch/x86/include/asm/uaccess_32.h25
-rw-r--r--arch/x86/include/asm/uaccess_64.h21
-rw-r--r--arch/x86/include/asm/unistd.h68
-rw-r--r--arch/x86/include/asm/uprobes.h58
-rw-r--r--arch/x86/include/asm/user.h4
-rw-r--r--arch/x86/include/asm/uv/uv.h5
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h134
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h4
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1
-rw-r--r--arch/x86/include/asm/vdso.h3
-rw-r--r--arch/x86/include/asm/vga.h6
-rw-r--r--arch/x86/include/asm/vgtod.h17
-rw-r--r--arch/x86/include/asm/virtext.h1
-rw-r--r--arch/x86/include/asm/vm86.h128
-rw-r--r--arch/x86/include/asm/vmx.h67
-rw-r--r--arch/x86/include/asm/vsyscall.h34
-rw-r--r--arch/x86/include/asm/word-at-a-time.h105
-rw-r--r--arch/x86/include/asm/x2apic.h17
-rw-r--r--arch/x86/include/asm/x86_init.h30
-rw-r--r--arch/x86/include/asm/xen/events.h1
-rw-r--r--arch/x86/include/asm/xen/hypercall.h37
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h1
-rw-r--r--arch/x86/include/asm/xen/interface.h19
-rw-r--r--arch/x86/include/asm/xen/page.h10
-rw-r--r--arch/x86/include/asm/xen/swiotlb-xen.h2
-rw-r--r--arch/x86/include/asm/xor.h4
-rw-r--r--arch/x86/include/asm/xor_32.h62
-rw-r--r--arch/x86/include/asm/xor_64.h69
-rw-r--r--arch/x86/include/asm/xor_avx.h184
-rw-r--r--arch/x86/include/asm/xsave.h33
-rw-r--r--arch/x86/include/uapi/asm/Kbuild64
-rw-r--r--arch/x86/include/uapi/asm/a.out.h20
-rw-r--r--arch/x86/include/uapi/asm/auxvec.h19
-rw-r--r--arch/x86/include/uapi/asm/bitsperlong.h13
-rw-r--r--arch/x86/include/uapi/asm/boot.h10
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h139
-rw-r--r--arch/x86/include/uapi/asm/byteorder.h6
-rw-r--r--arch/x86/include/uapi/asm/debugreg.h80
-rw-r--r--arch/x86/include/uapi/asm/e820.h75
-rw-r--r--arch/x86/include/uapi/asm/errno.h1
-rw-r--r--arch/x86/include/uapi/asm/fcntl.h1
-rw-r--r--arch/x86/include/uapi/asm/hw_breakpoint.h1
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h194
-rw-r--r--arch/x86/include/uapi/asm/ioctl.h1
-rw-r--r--arch/x86/include/uapi/asm/ioctls.h1
-rw-r--r--arch/x86/include/uapi/asm/ipcbuf.h1
-rw-r--r--arch/x86/include/uapi/asm/ist.h29
-rw-r--r--arch/x86/include/uapi/asm/kvm.h346
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h100
-rw-r--r--arch/x86/include/uapi/asm/ldt.h40
-rw-r--r--arch/x86/include/uapi/asm/mce.h121
-rw-r--r--arch/x86/include/uapi/asm/mman.h11
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h1
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h525
-rw-r--r--arch/x86/include/uapi/asm/msr.h15
-rw-r--r--arch/x86/include/uapi/asm/mtrr.h117
-rw-r--r--arch/x86/include/uapi/asm/param.h1
-rw-r--r--arch/x86/include/uapi/asm/perf_regs.h33
-rw-r--r--arch/x86/include/uapi/asm/poll.h1
-rw-r--r--arch/x86/include/uapi/asm/posix_types.h9
-rw-r--r--arch/x86/include/uapi/asm/posix_types_32.h25
-rw-r--r--arch/x86/include/uapi/asm/posix_types_64.h19
-rw-r--r--arch/x86/include/uapi/asm/posix_types_x32.h19
-rw-r--r--arch/x86/include/uapi/asm/prctl.h9
-rw-r--r--arch/x86/include/uapi/asm/processor-flags.h99
-rw-r--r--arch/x86/include/uapi/asm/ptrace-abi.h87
-rw-r--r--arch/x86/include/uapi/asm/ptrace.h78
-rw-r--r--arch/x86/include/uapi/asm/resource.h1
-rw-r--r--arch/x86/include/uapi/asm/sembuf.h24
-rw-r--r--arch/x86/include/uapi/asm/setup.h1
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h1
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h221
-rw-r--r--arch/x86/include/uapi/asm/sigcontext32.h77
-rw-r--r--arch/x86/include/uapi/asm/siginfo.h16
-rw-r--r--arch/x86/include/uapi/asm/signal.h139
-rw-r--r--arch/x86/include/uapi/asm/socket.h1
-rw-r--r--arch/x86/include/uapi/asm/sockios.h1
-rw-r--r--arch/x86/include/uapi/asm/stat.h135
-rw-r--r--arch/x86/include/uapi/asm/statfs.h12
-rw-r--r--arch/x86/include/uapi/asm/svm.h132
-rw-r--r--arch/x86/include/uapi/asm/swab.h36
-rw-r--r--arch/x86/include/uapi/asm/termbits.h1
-rw-r--r--arch/x86/include/uapi/asm/termios.h1
-rw-r--r--arch/x86/include/uapi/asm/types.h6
-rw-r--r--arch/x86/include/uapi/asm/ucontext.h12
-rw-r--r--arch/x86/include/uapi/asm/unistd.h17
-rw-r--r--arch/x86/include/uapi/asm/vm86.h129
-rw-r--r--arch/x86/include/uapi/asm/vmx.h109
-rw-r--r--arch/x86/include/uapi/asm/vsyscall.h17
254 files changed, 3748 insertions, 8693 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 7f669853317..6fa90a845e4 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -1,7 +1,25 @@
1include include/asm-generic/Kbuild.asm
1 2
2 3header-y += boot.h
3genhdr-y += unistd_32.h 4header-y += bootparam.h
4genhdr-y += unistd_64.h 5header-y += debugreg.h
5genhdr-y += unistd_x32.h 6header-y += e820.h
6 7header-y += hw_breakpoint.h
7generic-y += clkdev.h 8header-y += hyperv.h
9header-y += ist.h
10header-y += ldt.h
11header-y += mce.h
12header-y += msr-index.h
13header-y += msr.h
14header-y += mtrr.h
15header-y += posix_types_32.h
16header-y += posix_types_64.h
17header-y += prctl.h
18header-y += processor-flags.h
19header-y += ptrace-abi.h
20header-y += sigcontext32.h
21header-y += ucontext.h
22header-y += unistd_32.h
23header-y += unistd_64.h
24header-y += vm86.h
25header-y += vsyscall.h
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0c44630d178..610001d385d 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -29,7 +29,7 @@
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/mmu.h> 30#include <asm/mmu.h>
31#include <asm/mpspec.h> 31#include <asm/mpspec.h>
32#include <asm/realmode.h> 32#include <asm/trampoline.h>
33 33
34#define COMPILER_DEPENDENT_INT64 long long 34#define COMPILER_DEPENDENT_INT64 long long
35#define COMPILER_DEPENDENT_UINT64 unsigned long long 35#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -117,8 +117,11 @@ static inline void acpi_disable_pci(void)
117/* Low-level suspend routine. */ 117/* Low-level suspend routine. */
118extern int acpi_suspend_lowlevel(void); 118extern int acpi_suspend_lowlevel(void);
119 119
120/* Physical address to resume after wakeup */ 120extern const unsigned char acpi_wakeup_code[];
121#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start)) 121#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
122
123/* early initialization routine */
124extern void acpi_reserve_wakeup_memory(void);
122 125
123/* 126/*
124 * Check if the CPU can handle C2 and deeper 127 * Check if the CPU can handle C2 and deeper
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 372231c22a4..091508b533b 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -1,17 +1,14 @@
1#ifndef _ASM_X86_ALTERNATIVE_ASM_H
2#define _ASM_X86_ALTERNATIVE_ASM_H
3
4#ifdef __ASSEMBLY__ 1#ifdef __ASSEMBLY__
5 2
6#include <asm/asm.h> 3#include <asm/asm.h>
7 4
8#ifdef CONFIG_SMP 5#ifdef CONFIG_SMP
9 .macro LOCK_PREFIX 6 .macro LOCK_PREFIX
10672: lock 71: lock
11 .pushsection .smp_locks,"a" 8 .section .smp_locks,"a"
12 .balign 4 9 .balign 4
13 .long 672b - . 10 .long 1b - .
14 .popsection 11 .previous
15 .endm 12 .endm
16#else 13#else
17 .macro LOCK_PREFIX 14 .macro LOCK_PREFIX
@@ -27,5 +24,3 @@
27.endm 24.endm
28 25
29#endif /* __ASSEMBLY__ */ 26#endif /* __ASSEMBLY__ */
30
31#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 58ed6d96a6a..37ad100a221 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -29,10 +29,10 @@
29 29
30#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
31#define LOCK_PREFIX_HERE \ 31#define LOCK_PREFIX_HERE \
32 ".pushsection .smp_locks,\"a\"\n" \ 32 ".section .smp_locks,\"a\"\n" \
33 ".balign 4\n" \ 33 ".balign 4\n" \
34 ".long 671f - .\n" /* offset */ \ 34 ".long 671f - .\n" /* offset */ \
35 ".popsection\n" \ 35 ".previous\n" \
36 "671:" 36 "671:"
37 37
38#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " 38#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
@@ -60,7 +60,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
60 void *locks, void *locks_end, 60 void *locks, void *locks_end,
61 void *text, void *text_end); 61 void *text, void *text_end);
62extern void alternatives_smp_module_del(struct module *mod); 62extern void alternatives_smp_module_del(struct module *mod);
63extern void alternatives_enable_smp(void); 63extern void alternatives_smp_switch(int smp);
64extern int alternatives_text_reserved(void *start, void *end); 64extern int alternatives_text_reserved(void *start, void *end);
65extern bool skip_smp_alternatives; 65extern bool skip_smp_alternatives;
66#else 66#else
@@ -68,61 +68,30 @@ static inline void alternatives_smp_module_add(struct module *mod, char *name,
68 void *locks, void *locks_end, 68 void *locks, void *locks_end,
69 void *text, void *text_end) {} 69 void *text, void *text_end) {}
70static inline void alternatives_smp_module_del(struct module *mod) {} 70static inline void alternatives_smp_module_del(struct module *mod) {}
71static inline void alternatives_enable_smp(void) {} 71static inline void alternatives_smp_switch(int smp) {}
72static inline int alternatives_text_reserved(void *start, void *end) 72static inline int alternatives_text_reserved(void *start, void *end)
73{ 73{
74 return 0; 74 return 0;
75} 75}
76#endif /* CONFIG_SMP */ 76#endif /* CONFIG_SMP */
77 77
78#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
79
80#define b_replacement(number) "663"#number
81#define e_replacement(number) "664"#number
82
83#define alt_slen "662b-661b"
84#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
85
86#define ALTINSTR_ENTRY(feature, number) \
87 " .long 661b - .\n" /* label */ \
88 " .long " b_replacement(number)"f - .\n" /* new instruction */ \
89 " .word " __stringify(feature) "\n" /* feature bit */ \
90 " .byte " alt_slen "\n" /* source len */ \
91 " .byte " alt_rlen(number) "\n" /* replacement len */
92
93#define DISCARD_ENTRY(number) /* rlen <= slen */ \
94 " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
95
96#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
97 b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
98
99/* alternative assembly primitive: */ 78/* alternative assembly primitive: */
100#define ALTERNATIVE(oldinstr, newinstr, feature) \ 79#define ALTERNATIVE(oldinstr, newinstr, feature) \
101 OLDINSTR(oldinstr) \ 80 \
102 ".pushsection .altinstructions,\"a\"\n" \ 81 "661:\n\t" oldinstr "\n662:\n" \
103 ALTINSTR_ENTRY(feature, 1) \ 82 ".section .altinstructions,\"a\"\n" \
104 ".popsection\n" \ 83 " .long 661b - .\n" /* label */ \
105 ".pushsection .discard,\"aw\",@progbits\n" \ 84 " .long 663f - .\n" /* new instruction */ \
106 DISCARD_ENTRY(1) \ 85 " .word " __stringify(feature) "\n" /* feature bit */ \
107 ".popsection\n" \ 86 " .byte 662b-661b\n" /* sourcelen */ \
108 ".pushsection .altinstr_replacement, \"ax\"\n" \ 87 " .byte 664f-663f\n" /* replacementlen */ \
109 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ 88 ".previous\n" \
110 ".popsection" 89 ".section .discard,\"aw\",@progbits\n" \
111 90 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
112#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ 91 ".previous\n" \
113 OLDINSTR(oldinstr) \ 92 ".section .altinstr_replacement, \"ax\"\n" \
114 ".pushsection .altinstructions,\"a\"\n" \ 93 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
115 ALTINSTR_ENTRY(feature1, 1) \ 94 ".previous"
116 ALTINSTR_ENTRY(feature2, 2) \
117 ".popsection\n" \
118 ".pushsection .discard,\"aw\",@progbits\n" \
119 DISCARD_ENTRY(1) \
120 DISCARD_ENTRY(2) \
121 ".popsection\n" \
122 ".pushsection .altinstr_replacement, \"ax\"\n" \
123 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
124 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
125 ".popsection"
126 95
127/* 96/*
128 * This must be included *after* the definition of ALTERNATIVE due to 97 * This must be included *after* the definition of ALTERNATIVE due to
@@ -171,30 +140,11 @@ static inline int alternatives_text_reserved(void *start, void *end)
171 : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) 140 : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
172 141
173/* 142/*
174 * Like alternative_call, but there are two features and respective functions.
175 * If CPU has feature2, function2 is used.
176 * Otherwise, if CPU has feature1, function1 is used.
177 * Otherwise, old function is used.
178 */
179#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
180 output, input...) \
181 asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
182 "call %P[new2]", feature2) \
183 : output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
184 [new2] "i" (newfunc2), ## input)
185
186/*
187 * use this macro(s) if you need more than one output parameter 143 * use this macro(s) if you need more than one output parameter
188 * in alternative_io 144 * in alternative_io
189 */ 145 */
190#define ASM_OUTPUT2(a...) a 146#define ASM_OUTPUT2(a...) a
191 147
192/*
193 * use this macro if you need clobbers but no inputs in
194 * alternative_{input,io,call}()
195 */
196#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
197
198struct paravirt_patch_site; 148struct paravirt_patch_site;
199#ifdef CONFIG_PARAVIRT 149#ifdef CONFIG_PARAVIRT
200void apply_paravirt(struct paravirt_patch_site *start, 150void apply_paravirt(struct paravirt_patch_site *start,
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index b3341e9cd8f..78a1eff7422 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -21,36 +21,9 @@ extern int amd_numa_init(void);
21extern int amd_get_subcaches(int); 21extern int amd_get_subcaches(int);
22extern int amd_set_subcaches(int, int); 22extern int amd_set_subcaches(int, int);
23 23
24struct amd_l3_cache {
25 unsigned indices;
26 u8 subcaches[4];
27};
28
29struct threshold_block {
30 unsigned int block;
31 unsigned int bank;
32 unsigned int cpu;
33 u32 address;
34 u16 interrupt_enable;
35 bool interrupt_capable;
36 u16 threshold_limit;
37 struct kobject kobj;
38 struct list_head miscj;
39};
40
41struct threshold_bank {
42 struct kobject *kobj;
43 struct threshold_block *blocks;
44
45 /* initialized to the number of CPUs on the node sharing this bank */
46 atomic_t cpus;
47};
48
49struct amd_northbridge { 24struct amd_northbridge {
50 struct pci_dev *misc; 25 struct pci_dev *misc;
51 struct pci_dev *link; 26 struct pci_dev *link;
52 struct amd_l3_cache l3_cache;
53 struct threshold_bank *bank4;
54}; 27};
55 28
56struct amd_northbridge_info { 29struct amd_northbridge_info {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 33880342223..9b7273cb219 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -11,6 +11,7 @@
11#include <linux/atomic.h> 11#include <linux/atomic.h>
12#include <asm/fixmap.h> 12#include <asm/fixmap.h>
13#include <asm/mpspec.h> 13#include <asm/mpspec.h>
14#include <asm/system.h>
14#include <asm/msr.h> 15#include <asm/msr.h>
15 16
16#define ARCH_APICTIMER_STOPS_ON_C3 1 17#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -48,7 +49,6 @@ extern unsigned int apic_verbosity;
48extern int local_apic_timer_c2_ok; 49extern int local_apic_timer_c2_ok;
49 50
50extern int disable_apic; 51extern int disable_apic;
51extern unsigned int lapic_timer_frequency;
52 52
53#ifdef CONFIG_SMP 53#ifdef CONFIG_SMP
54extern void __inquire_remote_apic(int apicid); 54extern void __inquire_remote_apic(int apicid);
@@ -138,11 +138,6 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
138 wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); 138 wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
139} 139}
140 140
141static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
142{
143 wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
144}
145
146static inline u32 native_apic_msr_read(u32 reg) 141static inline u32 native_apic_msr_read(u32 reg)
147{ 142{
148 u64 msr; 143 u64 msr;
@@ -180,7 +175,6 @@ static inline u64 native_x2apic_icr_read(void)
180} 175}
181 176
182extern int x2apic_phys; 177extern int x2apic_phys;
183extern int x2apic_preenabled;
184extern void check_x2apic(void); 178extern void check_x2apic(void);
185extern void enable_x2apic(void); 179extern void enable_x2apic(void);
186extern void x2apic_icr_write(u32 low, u32 id); 180extern void x2apic_icr_write(u32 low, u32 id);
@@ -203,9 +197,6 @@ static inline void x2apic_force_phys(void)
203 x2apic_phys = 1; 197 x2apic_phys = 1;
204} 198}
205#else 199#else
206static inline void disable_x2apic(void)
207{
208}
209static inline void check_x2apic(void) 200static inline void check_x2apic(void)
210{ 201{
211} 202}
@@ -220,7 +211,6 @@ static inline void x2apic_force_phys(void)
220{ 211{
221} 212}
222 213
223#define nox2apic 0
224#define x2apic_preenabled 0 214#define x2apic_preenabled 0
225#define x2apic_supported() 0 215#define x2apic_supported() 0
226#endif 216#endif
@@ -292,7 +282,6 @@ struct apic {
292 282
293 int (*probe)(void); 283 int (*probe)(void);
294 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); 284 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
295 int (*apic_id_valid)(int apicid);
296 int (*apic_id_registered)(void); 285 int (*apic_id_registered)(void);
297 286
298 u32 irq_delivery_mode; 287 u32 irq_delivery_mode;
@@ -306,8 +295,7 @@ struct apic {
306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); 295 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
307 unsigned long (*check_apicid_present)(int apicid); 296 unsigned long (*check_apicid_present)(int apicid);
308 297
309 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask, 298 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
310 const struct cpumask *mask);
311 void (*init_apic_ldr)(void); 299 void (*init_apic_ldr)(void);
312 300
313 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); 301 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
@@ -332,9 +320,9 @@ struct apic {
332 unsigned long (*set_apic_id)(unsigned int id); 320 unsigned long (*set_apic_id)(unsigned int id);
333 unsigned long apic_id_mask; 321 unsigned long apic_id_mask;
334 322
335 int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, 323 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
336 const struct cpumask *andmask, 324 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
337 unsigned int *apicid); 325 const struct cpumask *andmask);
338 326
339 /* ipi */ 327 /* ipi */
340 void (*send_IPI_mask)(const struct cpumask *mask, int vector); 328 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
@@ -357,14 +345,6 @@ struct apic {
357 /* apic ops */ 345 /* apic ops */
358 u32 (*read)(u32 reg); 346 u32 (*read)(u32 reg);
359 void (*write)(u32 reg, u32 v); 347 void (*write)(u32 reg, u32 v);
360 /*
361 * ->eoi_write() has the same signature as ->write().
362 *
363 * Drivers can support both ->eoi_write() and ->write() by passing the same
364 * callback value. Kernel can override ->eoi_write() and fall back
365 * on write for EOI.
366 */
367 void (*eoi_write)(u32 reg, u32 v);
368 u64 (*icr_read)(void); 348 u64 (*icr_read)(void);
369 void (*icr_write)(u32 low, u32 high); 349 void (*icr_write)(u32 low, u32 high);
370 void (*wait_icr_idle)(void); 350 void (*wait_icr_idle)(void);
@@ -409,7 +389,7 @@ extern struct apic *apic;
409 * to enforce the order with in them. 389 * to enforce the order with in them.
410 */ 390 */
411#define apic_driver(sym) \ 391#define apic_driver(sym) \
412 static const struct apic *__apicdrivers_##sym __used \ 392 static struct apic *__apicdrivers_##sym __used \
413 __aligned(sizeof(struct apic *)) \ 393 __aligned(sizeof(struct apic *)) \
414 __section(.apicdrivers) = { &sym } 394 __section(.apicdrivers) = { &sym }
415 395
@@ -429,7 +409,6 @@ extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
429#endif 409#endif
430 410
431#ifdef CONFIG_X86_LOCAL_APIC 411#ifdef CONFIG_X86_LOCAL_APIC
432
433static inline u32 apic_read(u32 reg) 412static inline u32 apic_read(u32 reg)
434{ 413{
435 return apic->read(reg); 414 return apic->read(reg);
@@ -440,11 +419,6 @@ static inline void apic_write(u32 reg, u32 val)
440 apic->write(reg, val); 419 apic->write(reg, val);
441} 420}
442 421
443static inline void apic_eoi(void)
444{
445 apic->eoi_write(APIC_EOI, APIC_EOI_ACK);
446}
447
448static inline u64 apic_icr_read(void) 422static inline u64 apic_icr_read(void)
449{ 423{
450 return apic->icr_read(); 424 return apic->icr_read();
@@ -465,18 +439,14 @@ static inline u32 safe_apic_wait_icr_idle(void)
465 return apic->safe_wait_icr_idle(); 439 return apic->safe_wait_icr_idle();
466} 440}
467 441
468extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v));
469
470#else /* CONFIG_X86_LOCAL_APIC */ 442#else /* CONFIG_X86_LOCAL_APIC */
471 443
472static inline u32 apic_read(u32 reg) { return 0; } 444static inline u32 apic_read(u32 reg) { return 0; }
473static inline void apic_write(u32 reg, u32 val) { } 445static inline void apic_write(u32 reg, u32 val) { }
474static inline void apic_eoi(void) { }
475static inline u64 apic_icr_read(void) { return 0; } 446static inline u64 apic_icr_read(void) { return 0; }
476static inline void apic_icr_write(u32 low, u32 high) { } 447static inline void apic_icr_write(u32 low, u32 high) { }
477static inline void apic_wait_icr_idle(void) { } 448static inline void apic_wait_icr_idle(void) { }
478static inline u32 safe_apic_wait_icr_idle(void) { return 0; } 449static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
479static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
480 450
481#endif /* CONFIG_X86_LOCAL_APIC */ 451#endif /* CONFIG_X86_LOCAL_APIC */
482 452
@@ -486,7 +456,9 @@ static inline void ack_APIC_irq(void)
486 * ack_APIC_irq() actually gets compiled as a single instruction 456 * ack_APIC_irq() actually gets compiled as a single instruction
487 * ... yummie. 457 * ... yummie.
488 */ 458 */
489 apic_eoi(); 459
460 /* Docs say use 0 for future compatibility */
461 apic_write(APIC_EOI, 0);
490} 462}
491 463
492static inline unsigned default_get_apic_id(unsigned long x) 464static inline unsigned default_get_apic_id(unsigned long x)
@@ -541,12 +513,7 @@ static inline const struct cpumask *default_target_cpus(void)
541#endif 513#endif
542} 514}
543 515
544static inline const struct cpumask *online_target_cpus(void) 516DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
545{
546 return cpu_online_mask;
547}
548
549DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
550 517
551 518
552static inline unsigned int read_apic_id(void) 519static inline unsigned int read_apic_id(void)
@@ -558,11 +525,6 @@ static inline unsigned int read_apic_id(void)
558 return apic->get_apic_id(reg); 525 return apic->get_apic_id(reg);
559} 526}
560 527
561static inline int default_apic_id_valid(int apicid)
562{
563 return (apicid < 255);
564}
565
566extern void default_setup_apic_routing(void); 528extern void default_setup_apic_routing(void);
567 529
568extern struct apic apic_noop; 530extern struct apic apic_noop;
@@ -595,50 +557,21 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
595 557
596#endif 558#endif
597 559
598static inline int 560static inline unsigned int
599flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 561default_cpu_mask_to_apicid(const struct cpumask *cpumask)
600 const struct cpumask *andmask,
601 unsigned int *apicid)
602{ 562{
603 unsigned long cpu_mask = cpumask_bits(cpumask)[0] & 563 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
604 cpumask_bits(andmask)[0] &
605 cpumask_bits(cpu_online_mask)[0] &
606 APIC_ALL_CPUS;
607
608 if (likely(cpu_mask)) {
609 *apicid = (unsigned int)cpu_mask;
610 return 0;
611 } else {
612 return -EINVAL;
613 }
614} 564}
615 565
616extern int 566static inline unsigned int
617default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 567default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
618 const struct cpumask *andmask, 568 const struct cpumask *andmask)
619 unsigned int *apicid);
620
621static inline void
622flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
623 const struct cpumask *mask)
624{ 569{
625 /* Careful. Some cpus do not strictly honor the set of cpus 570 unsigned long mask1 = cpumask_bits(cpumask)[0];
626 * specified in the interrupt destination when using lowest 571 unsigned long mask2 = cpumask_bits(andmask)[0];
627 * priority interrupt delivery mode. 572 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
628 *
629 * In particular there was a hyperthreading cpu observed to
630 * deliver interrupts to the wrong hyperthread when only one
631 * hyperthread was specified in the interrupt desitination.
632 */
633 cpumask_clear(retmask);
634 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
635}
636 573
637static inline void 574 return (unsigned int)(mask1 & mask2 & mask3);
638default_vector_allocation_domain(int cpu, struct cpumask *retmask,
639 const struct cpumask *mask)
640{
641 cpumask_copy(retmask, cpumask_of(cpu));
642} 575}
643 576
644static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) 577static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
diff --git a/arch/x86/include/asm/apic_flat_64.h b/arch/x86/include/asm/apic_flat_64.h
deleted file mode 100644
index a2d31279644..00000000000
--- a/arch/x86/include/asm/apic_flat_64.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _ASM_X86_APIC_FLAT_64_H
2#define _ASM_X86_APIC_FLAT_64_H
3
4extern void flat_init_apic_ldr(void);
5
6#endif
7
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index c46bb99d5fb..34595d5e103 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -37,7 +37,7 @@
37#define APIC_ARBPRI_MASK 0xFFu 37#define APIC_ARBPRI_MASK 0xFFu
38#define APIC_PROCPRI 0xA0 38#define APIC_PROCPRI 0xA0
39#define APIC_EOI 0xB0 39#define APIC_EOI 0xB0
40#define APIC_EOI_ACK 0x0 /* Docs say 0 for future compat. */ 40#define APIC_EIO_ACK 0x0
41#define APIC_RRR 0xC0 41#define APIC_RRR 0xC0
42#define APIC_LDR 0xD0 42#define APIC_LDR 0xD0
43#define APIC_LDR_MASK (0xFFu << 24) 43#define APIC_LDR_MASK (0xFFu << 24)
@@ -100,9 +100,7 @@
100#define APIC_TIMER_BASE_CLKIN 0x0 100#define APIC_TIMER_BASE_CLKIN 0x0
101#define APIC_TIMER_BASE_TMBASE 0x1 101#define APIC_TIMER_BASE_TMBASE 0x1
102#define APIC_TIMER_BASE_DIV 0x2 102#define APIC_TIMER_BASE_DIV 0x2
103#define APIC_LVT_TIMER_ONESHOT (0 << 17)
104#define APIC_LVT_TIMER_PERIODIC (1 << 17) 103#define APIC_LVT_TIMER_PERIODIC (1 << 17)
105#define APIC_LVT_TIMER_TSCDEADLINE (2 << 17)
106#define APIC_LVT_MASKED (1 << 16) 104#define APIC_LVT_MASKED (1 << 16)
107#define APIC_LVT_LEVEL_TRIGGER (1 << 15) 105#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
108#define APIC_LVT_REMOTE_IRR (1 << 14) 106#define APIC_LVT_REMOTE_IRR (1 << 14)
@@ -144,7 +142,6 @@
144 142
145#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) 143#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
146#define APIC_BASE_MSR 0x800 144#define APIC_BASE_MSR 0x800
147#define XAPIC_ENABLE (1UL << 11)
148#define X2APIC_ENABLE (1UL << 10) 145#define X2APIC_ENABLE (1UL << 10)
149 146
150#ifdef CONFIG_X86_32 147#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
deleted file mode 100644
index 0d9ec770f2f..00000000000
--- a/arch/x86/include/asm/archrandom.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * This file is part of the Linux kernel.
3 *
4 * Copyright (c) 2011, Intel Corporation
5 * Authors: Fenghua Yu <fenghua.yu@intel.com>,
6 * H. Peter Anvin <hpa@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 */
22
23#ifndef ASM_X86_ARCHRANDOM_H
24#define ASM_X86_ARCHRANDOM_H
25
26#include <asm/processor.h>
27#include <asm/cpufeature.h>
28#include <asm/alternative.h>
29#include <asm/nops.h>
30
31#define RDRAND_RETRY_LOOPS 10
32
33#define RDRAND_INT ".byte 0x0f,0xc7,0xf0"
34#ifdef CONFIG_X86_64
35# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0"
36#else
37# define RDRAND_LONG RDRAND_INT
38#endif
39
40#ifdef CONFIG_ARCH_RANDOM
41
42#define GET_RANDOM(name, type, rdrand, nop) \
43static inline int name(type *v) \
44{ \
45 int ok; \
46 alternative_io("movl $0, %0\n\t" \
47 nop, \
48 "\n1: " rdrand "\n\t" \
49 "jc 2f\n\t" \
50 "decl %0\n\t" \
51 "jnz 1b\n\t" \
52 "2:", \
53 X86_FEATURE_RDRAND, \
54 ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
55 "0" (RDRAND_RETRY_LOOPS)); \
56 return ok; \
57}
58
59#ifdef CONFIG_X86_64
60
61GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
62GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
63
64#else
65
66GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
67GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
68
69#endif /* CONFIG_X86_64 */
70
71#endif /* CONFIG_ARCH_RANDOM */
72
73extern void x86_init_rdrand(struct cpuinfo_x86 *c);
74
75#endif /* ASM_X86_ARCHRANDOM_H */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 1c2d247f65c..9412d6558c8 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -4,9 +4,11 @@
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
6# define __ASM_FORM_COMMA(x) x, 6# define __ASM_FORM_COMMA(x) x,
7# define __ASM_EX_SEC .section __ex_table, "a"
7#else 8#else
8# define __ASM_FORM(x) " " #x " " 9# define __ASM_FORM(x) " " #x " "
9# define __ASM_FORM_COMMA(x) " " #x "," 10# define __ASM_FORM_COMMA(x) " " #x ","
11# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
10#endif 12#endif
11 13
12#ifdef CONFIG_X86_32 14#ifdef CONFIG_X86_32
@@ -40,33 +42,17 @@
40 42
41/* Exception table entry */ 43/* Exception table entry */
42#ifdef __ASSEMBLY__ 44#ifdef __ASSEMBLY__
43# define _ASM_EXTABLE(from,to) \ 45# define _ASM_EXTABLE(from,to) \
44 .pushsection "__ex_table","a" ; \ 46 __ASM_EX_SEC ; \
45 .balign 8 ; \ 47 _ASM_ALIGN ; \
46 .long (from) - . ; \ 48 _ASM_PTR from , to ; \
47 .long (to) - . ; \ 49 .previous
48 .popsection
49
50# define _ASM_EXTABLE_EX(from,to) \
51 .pushsection "__ex_table","a" ; \
52 .balign 8 ; \
53 .long (from) - . ; \
54 .long (to) - . + 0x7ffffff0 ; \
55 .popsection
56#else 50#else
57# define _ASM_EXTABLE(from,to) \ 51# define _ASM_EXTABLE(from,to) \
58 " .pushsection \"__ex_table\",\"a\"\n" \ 52 __ASM_EX_SEC \
59 " .balign 8\n" \ 53 _ASM_ALIGN "\n" \
60 " .long (" #from ") - .\n" \ 54 _ASM_PTR #from "," #to "\n" \
61 " .long (" #to ") - .\n" \ 55 " .previous\n"
62 " .popsection\n"
63
64# define _ASM_EXTABLE_EX(from,to) \
65 " .pushsection \"__ex_table\",\"a\"\n" \
66 " .balign 8\n" \
67 " .long (" #from ") - .\n" \
68 " .long (" #to ") - . + 0x7ffffff0\n" \
69 " .popsection\n"
70#endif 56#endif
71 57
72#endif /* _ASM_X86_ASM_H */ 58#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 722aa3b0462..10572e309ab 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -172,7 +172,27 @@ static inline int atomic_add_negative(int i, atomic_t *v)
172 */ 172 */
173static inline int atomic_add_return(int i, atomic_t *v) 173static inline int atomic_add_return(int i, atomic_t *v)
174{ 174{
175 return i + xadd(&v->counter, i); 175 int __i;
176#ifdef CONFIG_M386
177 unsigned long flags;
178 if (unlikely(boot_cpu_data.x86 <= 3))
179 goto no_xadd;
180#endif
181 /* Modern 486+ processor */
182 __i = i;
183 asm volatile(LOCK_PREFIX "xaddl %0, %1"
184 : "+r" (i), "+m" (v->counter)
185 : : "memory");
186 return i + __i;
187
188#ifdef CONFIG_M386
189no_xadd: /* Legacy 386 processor */
190 raw_local_irq_save(flags);
191 __i = atomic_read(v);
192 atomic_set(v, i + __i);
193 raw_local_irq_restore(flags);
194 return i + __i;
195#endif
176} 196}
177 197
178/** 198/**
@@ -224,6 +244,30 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
224 return c; 244 return c;
225} 245}
226 246
247
248/*
249 * atomic_dec_if_positive - decrement by 1 if old value positive
250 * @v: pointer of type atomic_t
251 *
252 * The function returns the old value of *v minus 1, even if
253 * the atomic variable, v, was not decremented.
254 */
255static inline int atomic_dec_if_positive(atomic_t *v)
256{
257 int c, old, dec;
258 c = atomic_read(v);
259 for (;;) {
260 dec = c - 1;
261 if (unlikely(dec < 0))
262 break;
263 old = atomic_cmpxchg((v), c, dec);
264 if (likely(old == c))
265 break;
266 c = old;
267 }
268 return dec;
269}
270
227/** 271/**
228 * atomic_inc_short - increment of a short integer 272 * atomic_inc_short - increment of a short integer
229 * @v: pointer to type int 273 * @v: pointer to type int
@@ -269,9 +313,9 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
269#define smp_mb__after_atomic_inc() barrier() 313#define smp_mb__after_atomic_inc() barrier()
270 314
271#ifdef CONFIG_X86_32 315#ifdef CONFIG_X86_32
272# include <asm/atomic64_32.h> 316# include "atomic64_32.h"
273#else 317#else
274# include <asm/atomic64_64.h> 318# include "atomic64_64.h"
275#endif 319#endif
276 320
277#endif /* _ASM_X86_ATOMIC_H */ 321#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index b154de75c90..24098aafce0 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -14,56 +14,17 @@ typedef struct {
14 14
15#define ATOMIC64_INIT(val) { (val) } 15#define ATOMIC64_INIT(val) { (val) }
16 16
17#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
18#ifndef ATOMIC64_EXPORT
19#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
20#else
21#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
22 ATOMIC64_EXPORT(atomic64_##sym)
23#endif
24
25#ifdef CONFIG_X86_CMPXCHG64 17#ifdef CONFIG_X86_CMPXCHG64
26#define __alternative_atomic64(f, g, out, in...) \ 18#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8"
27 asm volatile("call %P[func]" \
28 : out : [func] "i" (atomic64_##g##_cx8), ## in)
29
30#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
31#else 19#else
32#define __alternative_atomic64(f, g, out, in...) \ 20#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8)
33 alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
34 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
35
36#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
37 ATOMIC64_DECL_ONE(sym##_386)
38
39ATOMIC64_DECL_ONE(add_386);
40ATOMIC64_DECL_ONE(sub_386);
41ATOMIC64_DECL_ONE(inc_386);
42ATOMIC64_DECL_ONE(dec_386);
43#endif 21#endif
44 22
45#define alternative_atomic64(f, out, in...) \ 23#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f)
46 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
47
48ATOMIC64_DECL(read);
49ATOMIC64_DECL(set);
50ATOMIC64_DECL(xchg);
51ATOMIC64_DECL(add_return);
52ATOMIC64_DECL(sub_return);
53ATOMIC64_DECL(inc_return);
54ATOMIC64_DECL(dec_return);
55ATOMIC64_DECL(dec_if_positive);
56ATOMIC64_DECL(inc_not_zero);
57ATOMIC64_DECL(add_unless);
58
59#undef ATOMIC64_DECL
60#undef ATOMIC64_DECL_ONE
61#undef __ATOMIC64_DECL
62#undef ATOMIC64_EXPORT
63 24
64/** 25/**
65 * atomic64_cmpxchg - cmpxchg atomic64 variable 26 * atomic64_cmpxchg - cmpxchg atomic64 variable
66 * @v: pointer to type atomic64_t 27 * @p: pointer to type atomic64_t
67 * @o: expected value 28 * @o: expected value
68 * @n: new value 29 * @n: new value
69 * 30 *
@@ -89,16 +50,18 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n)
89 long long o; 50 long long o;
90 unsigned high = (unsigned)(n >> 32); 51 unsigned high = (unsigned)(n >> 32);
91 unsigned low = (unsigned)n; 52 unsigned low = (unsigned)n;
92 alternative_atomic64(xchg, "=&A" (o), 53 asm volatile(ATOMIC64_ALTERNATIVE(xchg)
93 "S" (v), "b" (low), "c" (high) 54 : "=A" (o), "+b" (low), "+c" (high)
94 : "memory"); 55 : "S" (v)
56 : "memory"
57 );
95 return o; 58 return o;
96} 59}
97 60
98/** 61/**
99 * atomic64_set - set atomic64 variable 62 * atomic64_set - set atomic64 variable
100 * @v: pointer to type atomic64_t 63 * @v: pointer to type atomic64_t
101 * @i: value to assign 64 * @n: value to assign
102 * 65 *
103 * Atomically sets the value of @v to @n. 66 * Atomically sets the value of @v to @n.
104 */ 67 */
@@ -106,9 +69,11 @@ static inline void atomic64_set(atomic64_t *v, long long i)
106{ 69{
107 unsigned high = (unsigned)(i >> 32); 70 unsigned high = (unsigned)(i >> 32);
108 unsigned low = (unsigned)i; 71 unsigned low = (unsigned)i;
109 alternative_atomic64(set, /* no output */, 72 asm volatile(ATOMIC64_ALTERNATIVE(set)
110 "S" (v), "b" (low), "c" (high) 73 : "+b" (low), "+c" (high)
111 : "eax", "edx", "memory"); 74 : "S" (v)
75 : "eax", "edx", "memory"
76 );
112} 77}
113 78
114/** 79/**
@@ -117,10 +82,13 @@ static inline void atomic64_set(atomic64_t *v, long long i)
117 * 82 *
118 * Atomically reads the value of @v and returns it. 83 * Atomically reads the value of @v and returns it.
119 */ 84 */
120static inline long long atomic64_read(const atomic64_t *v) 85static inline long long atomic64_read(atomic64_t *v)
121{ 86{
122 long long r; 87 long long r;
123 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); 88 asm volatile(ATOMIC64_ALTERNATIVE(read)
89 : "=A" (r), "+c" (v)
90 : : "memory"
91 );
124 return r; 92 return r;
125 } 93 }
126 94
@@ -133,9 +101,10 @@ static inline long long atomic64_read(const atomic64_t *v)
133 */ 101 */
134static inline long long atomic64_add_return(long long i, atomic64_t *v) 102static inline long long atomic64_add_return(long long i, atomic64_t *v)
135{ 103{
136 alternative_atomic64(add_return, 104 asm volatile(ATOMIC64_ALTERNATIVE(add_return)
137 ASM_OUTPUT2("+A" (i), "+c" (v)), 105 : "+A" (i), "+c" (v)
138 ASM_NO_INPUT_CLOBBER("memory")); 106 : : "memory"
107 );
139 return i; 108 return i;
140} 109}
141 110
@@ -144,25 +113,32 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
144 */ 113 */
145static inline long long atomic64_sub_return(long long i, atomic64_t *v) 114static inline long long atomic64_sub_return(long long i, atomic64_t *v)
146{ 115{
147 alternative_atomic64(sub_return, 116 asm volatile(ATOMIC64_ALTERNATIVE(sub_return)
148 ASM_OUTPUT2("+A" (i), "+c" (v)), 117 : "+A" (i), "+c" (v)
149 ASM_NO_INPUT_CLOBBER("memory")); 118 : : "memory"
119 );
150 return i; 120 return i;
151} 121}
152 122
153static inline long long atomic64_inc_return(atomic64_t *v) 123static inline long long atomic64_inc_return(atomic64_t *v)
154{ 124{
155 long long a; 125 long long a;
156 alternative_atomic64(inc_return, "=&A" (a), 126 asm volatile(ATOMIC64_ALTERNATIVE(inc_return)
157 "S" (v) : "memory", "ecx"); 127 : "=A" (a)
128 : "S" (v)
129 : "memory", "ecx"
130 );
158 return a; 131 return a;
159} 132}
160 133
161static inline long long atomic64_dec_return(atomic64_t *v) 134static inline long long atomic64_dec_return(atomic64_t *v)
162{ 135{
163 long long a; 136 long long a;
164 alternative_atomic64(dec_return, "=&A" (a), 137 asm volatile(ATOMIC64_ALTERNATIVE(dec_return)
165 "S" (v) : "memory", "ecx"); 138 : "=A" (a)
139 : "S" (v)
140 : "memory", "ecx"
141 );
166 return a; 142 return a;
167} 143}
168 144
@@ -175,9 +151,10 @@ static inline long long atomic64_dec_return(atomic64_t *v)
175 */ 151 */
176static inline long long atomic64_add(long long i, atomic64_t *v) 152static inline long long atomic64_add(long long i, atomic64_t *v)
177{ 153{
178 __alternative_atomic64(add, add_return, 154 asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return)
179 ASM_OUTPUT2("+A" (i), "+c" (v)), 155 : "+A" (i), "+c" (v)
180 ASM_NO_INPUT_CLOBBER("memory")); 156 : : "memory"
157 );
181 return i; 158 return i;
182} 159}
183 160
@@ -190,9 +167,10 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
190 */ 167 */
191static inline long long atomic64_sub(long long i, atomic64_t *v) 168static inline long long atomic64_sub(long long i, atomic64_t *v)
192{ 169{
193 __alternative_atomic64(sub, sub_return, 170 asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return)
194 ASM_OUTPUT2("+A" (i), "+c" (v)), 171 : "+A" (i), "+c" (v)
195 ASM_NO_INPUT_CLOBBER("memory")); 172 : : "memory"
173 );
196 return i; 174 return i;
197} 175}
198 176
@@ -200,7 +178,7 @@ static inline long long atomic64_sub(long long i, atomic64_t *v)
200 * atomic64_sub_and_test - subtract value from variable and test result 178 * atomic64_sub_and_test - subtract value from variable and test result
201 * @i: integer value to subtract 179 * @i: integer value to subtract
202 * @v: pointer to type atomic64_t 180 * @v: pointer to type atomic64_t
203 * 181 *
204 * Atomically subtracts @i from @v and returns 182 * Atomically subtracts @i from @v and returns
205 * true if the result is zero, or false for all 183 * true if the result is zero, or false for all
206 * other cases. 184 * other cases.
@@ -218,20 +196,24 @@ static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
218 */ 196 */
219static inline void atomic64_inc(atomic64_t *v) 197static inline void atomic64_inc(atomic64_t *v)
220{ 198{
221 __alternative_atomic64(inc, inc_return, /* no output */, 199 asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return)
222 "S" (v) : "memory", "eax", "ecx", "edx"); 200 : : "S" (v)
201 : "memory", "eax", "ecx", "edx"
202 );
223} 203}
224 204
225/** 205/**
226 * atomic64_dec - decrement atomic64 variable 206 * atomic64_dec - decrement atomic64 variable
227 * @v: pointer to type atomic64_t 207 * @ptr: pointer to type atomic64_t
228 * 208 *
229 * Atomically decrements @v by 1. 209 * Atomically decrements @ptr by 1.
230 */ 210 */
231static inline void atomic64_dec(atomic64_t *v) 211static inline void atomic64_dec(atomic64_t *v)
232{ 212{
233 __alternative_atomic64(dec, dec_return, /* no output */, 213 asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return)
234 "S" (v) : "memory", "eax", "ecx", "edx"); 214 : : "S" (v)
215 : "memory", "eax", "ecx", "edx"
216 );
235} 217}
236 218
237/** 219/**
@@ -281,15 +263,15 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v)
281 * @u: ...unless v is equal to u. 263 * @u: ...unless v is equal to u.
282 * 264 *
283 * Atomically adds @a to @v, so long as it was not @u. 265 * Atomically adds @a to @v, so long as it was not @u.
284 * Returns non-zero if the add was done, zero otherwise. 266 * Returns the old value of @v.
285 */ 267 */
286static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) 268static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
287{ 269{
288 unsigned low = (unsigned)u; 270 unsigned low = (unsigned)u;
289 unsigned high = (unsigned)(u >> 32); 271 unsigned high = (unsigned)(u >> 32);
290 alternative_atomic64(add_unless, 272 asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t"
291 ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)), 273 : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
292 "S" (v) : "memory"); 274 : : "memory");
293 return (int)a; 275 return (int)a;
294} 276}
295 277
@@ -297,20 +279,26 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
297static inline int atomic64_inc_not_zero(atomic64_t *v) 279static inline int atomic64_inc_not_zero(atomic64_t *v)
298{ 280{
299 int r; 281 int r;
300 alternative_atomic64(inc_not_zero, "=&a" (r), 282 asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero)
301 "S" (v) : "ecx", "edx", "memory"); 283 : "=a" (r)
284 : "S" (v)
285 : "ecx", "edx", "memory"
286 );
302 return r; 287 return r;
303} 288}
304 289
305static inline long long atomic64_dec_if_positive(atomic64_t *v) 290static inline long long atomic64_dec_if_positive(atomic64_t *v)
306{ 291{
307 long long r; 292 long long r;
308 alternative_atomic64(dec_if_positive, "=&A" (r), 293 asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive)
309 "S" (v) : "ecx", "memory"); 294 : "=A" (r)
295 : "S" (v)
296 : "ecx", "memory"
297 );
310 return r; 298 return r;
311} 299}
312 300
313#undef alternative_atomic64 301#undef ATOMIC64_ALTERNATIVE
314#undef __alternative_atomic64 302#undef ATOMIC64_ALTERNATIVE_
315 303
316#endif /* _ASM_X86_ATOMIC64_32_H */ 304#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 0e1cbfc8ee0..017594d403f 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -170,7 +170,11 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
170 */ 170 */
171static inline long atomic64_add_return(long i, atomic64_t *v) 171static inline long atomic64_add_return(long i, atomic64_t *v)
172{ 172{
173 return i + xadd(&v->counter, i); 173 long __i = i;
174 asm volatile(LOCK_PREFIX "xaddq %0, %1;"
175 : "+r" (i), "+m" (v->counter)
176 : : "memory");
177 return i + __i;
174} 178}
175 179
176static inline long atomic64_sub_return(long i, atomic64_t *v) 180static inline long atomic64_sub_return(long i, atomic64_t *v)
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
deleted file mode 100644
index c6cd358a1ee..00000000000
--- a/arch/x86/include/asm/barrier.h
+++ /dev/null
@@ -1,116 +0,0 @@
1#ifndef _ASM_X86_BARRIER_H
2#define _ASM_X86_BARRIER_H
3
4#include <asm/alternative.h>
5#include <asm/nops.h>
6
7/*
8 * Force strict CPU ordering.
9 * And yes, this is required on UP too when we're talking
10 * to devices.
11 */
12
13#ifdef CONFIG_X86_32
14/*
15 * Some non-Intel clones support out of order store. wmb() ceases to be a
16 * nop for these.
17 */
18#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
21#else
22#define mb() asm volatile("mfence":::"memory")
23#define rmb() asm volatile("lfence":::"memory")
24#define wmb() asm volatile("sfence" ::: "memory")
25#endif
26
27/**
28 * read_barrier_depends - Flush all pending reads that subsequents reads
29 * depend on.
30 *
31 * No data-dependent reads from memory-like regions are ever reordered
32 * over this barrier. All reads preceding this primitive are guaranteed
33 * to access memory (but not necessarily other CPUs' caches) before any
34 * reads following this primitive that depend on the data return by
35 * any of the preceding reads. This primitive is much lighter weight than
36 * rmb() on most CPUs, and is never heavier weight than is
37 * rmb().
38 *
39 * These ordering constraints are respected by both the local CPU
40 * and the compiler.
41 *
42 * Ordering is not guaranteed by anything other than these primitives,
43 * not even by data dependencies. See the documentation for
44 * memory_barrier() for examples and URLs to more information.
45 *
46 * For example, the following code would force ordering (the initial
47 * value of "a" is zero, "b" is one, and "p" is "&a"):
48 *
49 * <programlisting>
50 * CPU 0 CPU 1
51 *
52 * b = 2;
53 * memory_barrier();
54 * p = &b; q = p;
55 * read_barrier_depends();
56 * d = *q;
57 * </programlisting>
58 *
59 * because the read of "*q" depends on the read of "p" and these
60 * two reads are separated by a read_barrier_depends(). However,
61 * the following code, with the same initial values for "a" and "b":
62 *
63 * <programlisting>
64 * CPU 0 CPU 1
65 *
66 * a = 2;
67 * memory_barrier();
68 * b = 3; y = b;
69 * read_barrier_depends();
70 * x = a;
71 * </programlisting>
72 *
73 * does not enforce ordering, since there is no data dependency between
74 * the read of "a" and the read of "b". Therefore, on some CPUs, such
75 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
76 * in cases like this where there are no data dependencies.
77 **/
78
79#define read_barrier_depends() do { } while (0)
80
81#ifdef CONFIG_SMP
82#define smp_mb() mb()
83#ifdef CONFIG_X86_PPRO_FENCE
84# define smp_rmb() rmb()
85#else
86# define smp_rmb() barrier()
87#endif
88#ifdef CONFIG_X86_OOSTORE
89# define smp_wmb() wmb()
90#else
91# define smp_wmb() barrier()
92#endif
93#define smp_read_barrier_depends() read_barrier_depends()
94#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
95#else
96#define smp_mb() barrier()
97#define smp_rmb() barrier()
98#define smp_wmb() barrier()
99#define smp_read_barrier_depends() do { } while (0)
100#define set_mb(var, value) do { var = value; barrier(); } while (0)
101#endif
102
103/*
104 * Stop RDTSC speculation. This is needed when you need to use RDTSC
105 * (or get_cycles or vread that possibly accesses the TSC) in a defined
106 * code region.
107 *
108 * (Could use an alternative three way for this if there was one.)
109 */
110static __always_inline void rdtsc_barrier(void)
111{
112 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
113 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
114}
115
116#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 6dfd0195bb5..1775d6e5920 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -15,8 +15,6 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/alternative.h> 16#include <asm/alternative.h>
17 17
18#define BIT_64(n) (U64_C(1) << (n))
19
20/* 18/*
21 * These have to be done with inline assembly: that way the bit-setting 19 * These have to be done with inline assembly: that way the bit-setting
22 * is guaranteed to be atomic. All bit operations return 0 if the bit 20 * is guaranteed to be atomic. All bit operations return 0 if the bit
@@ -264,13 +262,6 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
264 * This operation is non-atomic and can be reordered. 262 * This operation is non-atomic and can be reordered.
265 * If two examples of this operation race, one can appear to succeed 263 * If two examples of this operation race, one can appear to succeed
266 * but actually fail. You must protect multiple accesses with a lock. 264 * but actually fail. You must protect multiple accesses with a lock.
267 *
268 * Note: the operation is performed atomically with respect to
269 * the local CPU, but not other CPUs. Portable code should not
270 * rely on this behaviour.
271 * KVM relies on this behaviour on x86 for modifying memory that is also
272 * accessed from a hypervisor on the same CPU if running in a VM: don't change
273 * this without also updating arch/x86/kernel/kvm.c
274 */ 265 */
275static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) 266static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
276{ 267{
@@ -355,7 +346,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
355 */ 346 */
356static inline unsigned long __ffs(unsigned long word) 347static inline unsigned long __ffs(unsigned long word)
357{ 348{
358 asm("rep; bsf %1,%0" 349 asm("bsf %1,%0"
359 : "=r" (word) 350 : "=r" (word)
360 : "rm" (word)); 351 : "rm" (word));
361 return word; 352 return word;
@@ -369,7 +360,7 @@ static inline unsigned long __ffs(unsigned long word)
369 */ 360 */
370static inline unsigned long ffz(unsigned long word) 361static inline unsigned long ffz(unsigned long word)
371{ 362{
372 asm("rep; bsf %1,%0" 363 asm("bsf %1,%0"
373 : "=r" (word) 364 : "=r" (word)
374 : "r" (~word)); 365 : "r" (~word));
375 return word; 366 return word;
@@ -389,8 +380,6 @@ static inline unsigned long __fls(unsigned long word)
389 return word; 380 return word;
390} 381}
391 382
392#undef ADDR
393
394#ifdef __KERNEL__ 383#ifdef __KERNEL__
395/** 384/**
396 * ffs - find first set bit in word 385 * ffs - find first set bit in word
@@ -406,24 +395,10 @@ static inline unsigned long __fls(unsigned long word)
406static inline int ffs(int x) 395static inline int ffs(int x)
407{ 396{
408 int r; 397 int r;
409 398#ifdef CONFIG_X86_CMOV
410#ifdef CONFIG_X86_64
411 /*
412 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
413 * dest reg is undefined if x==0, but their CPU architect says its
414 * value is written to set it to the same as before, except that the
415 * top 32 bits will be cleared.
416 *
417 * We cannot do this on 32 bits because at the very least some
418 * 486 CPUs did not behave this way.
419 */
420 asm("bsfl %1,%0"
421 : "=r" (r)
422 : "rm" (x), "0" (-1));
423#elif defined(CONFIG_X86_CMOV)
424 asm("bsfl %1,%0\n\t" 399 asm("bsfl %1,%0\n\t"
425 "cmovzl %2,%0" 400 "cmovzl %2,%0"
426 : "=&r" (r) : "rm" (x), "r" (-1)); 401 : "=r" (r) : "rm" (x), "r" (-1));
427#else 402#else
428 asm("bsfl %1,%0\n\t" 403 asm("bsfl %1,%0\n\t"
429 "jnz 1f\n\t" 404 "jnz 1f\n\t"
@@ -447,21 +422,7 @@ static inline int ffs(int x)
447static inline int fls(int x) 422static inline int fls(int x)
448{ 423{
449 int r; 424 int r;
450 425#ifdef CONFIG_X86_CMOV
451#ifdef CONFIG_X86_64
452 /*
453 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
454 * dest reg is undefined if x==0, but their CPU architect says its
455 * value is written to set it to the same as before, except that the
456 * top 32 bits will be cleared.
457 *
458 * We cannot do this on 32 bits because at the very least some
459 * 486 CPUs did not behave this way.
460 */
461 asm("bsrl %1,%0"
462 : "=r" (r)
463 : "rm" (x), "0" (-1));
464#elif defined(CONFIG_X86_CMOV)
465 asm("bsrl %1,%0\n\t" 426 asm("bsrl %1,%0\n\t"
466 "cmovzl %2,%0" 427 "cmovzl %2,%0"
467 : "=&r" (r) : "rm" (x), "rm" (-1)); 428 : "=&r" (r) : "rm" (x), "rm" (-1));
@@ -473,35 +434,11 @@ static inline int fls(int x)
473#endif 434#endif
474 return r + 1; 435 return r + 1;
475} 436}
437#endif /* __KERNEL__ */
476 438
477/** 439#undef ADDR
478 * fls64 - find last set bit in a 64-bit word 440
479 * @x: the word to search 441#ifdef __KERNEL__
480 *
481 * This is defined in a similar way as the libc and compiler builtin
482 * ffsll, but returns the position of the most significant set bit.
483 *
484 * fls64(value) returns 0 if value is 0 or the position of the last
485 * set bit if value is nonzero. The last (most significant) bit is
486 * at position 64.
487 */
488#ifdef CONFIG_X86_64
489static __always_inline int fls64(__u64 x)
490{
491 int bitpos = -1;
492 /*
493 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
494 * dest reg is undefined if x==0, but their CPU architect says its
495 * value is written to set it to the same as before.
496 */
497 asm("bsrq %1,%q0"
498 : "+r" (bitpos)
499 : "rm" (x));
500 return bitpos + 1;
501}
502#else
503#include <asm-generic/bitops/fls64.h>
504#endif
505 442
506#include <asm-generic/bitops/find.h> 443#include <asm-generic/bitops/find.h>
507 444
@@ -513,6 +450,12 @@ static __always_inline int fls64(__u64 x)
513 450
514#include <asm-generic/bitops/const_hweight.h> 451#include <asm-generic/bitops/const_hweight.h>
515 452
453#endif /* __KERNEL__ */
454
455#include <asm-generic/bitops/fls64.h>
456
457#ifdef __KERNEL__
458
516#include <asm-generic/bitops/le.h> 459#include <asm-generic/bitops/le.h>
517 460
518#include <asm-generic/bitops/ext2-atomic-setbit.h> 461#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 4fa687a47a6..5e1a2eef3e7 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -1,9 +1,14 @@
1#ifndef _ASM_X86_BOOT_H 1#ifndef _ASM_X86_BOOT_H
2#define _ASM_X86_BOOT_H 2#define _ASM_X86_BOOT_H
3 3
4/* Internal svga startup constants */
5#define NORMAL_VGA 0xffff /* 80x25 mode */
6#define EXTENDED_VGA 0xfffe /* 80x50 mode */
7#define ASK_VGA 0xfffd /* ask for it at bootup */
8
9#ifdef __KERNEL__
4 10
5#include <asm/pgtable_types.h> 11#include <asm/pgtable_types.h>
6#include <uapi/asm/boot.h>
7 12
8/* Physical address where kernel should be loaded. */ 13/* Physical address where kernel should be loaded. */
9#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ 14#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
@@ -14,7 +19,7 @@
14#ifdef CONFIG_X86_64 19#ifdef CONFIG_X86_64
15#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT 20#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16#else 21#else
17#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER) 22#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER)
18#endif 23#endif
19#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) 24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
20 25
@@ -37,4 +42,6 @@
37#define BOOT_STACK_SIZE 0x1000 42#define BOOT_STACK_SIZE 0x1000
38#endif 43#endif
39 44
45#endif /* __KERNEL__ */
46
40#endif /* _ASM_X86_BOOT_H */ 47#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 11e1152222d..f654d1bb17f 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -36,8 +36,4 @@ do { \
36#endif /* !CONFIG_BUG */ 36#endif /* !CONFIG_BUG */
37 37
38#include <asm-generic/bug.h> 38#include <asm-generic/bug.h>
39
40
41extern void show_regs_common(void);
42
43#endif /* _ASM_X86_BUG_H */ 39#endif /* _ASM_X86_BUG_H */
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 9863ee3747d..4e12668711e 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -3,7 +3,6 @@
3 3
4/* Caches aren't brain-dead on the intel. */ 4/* Caches aren't brain-dead on the intel. */
5#include <asm-generic/cacheflush.h> 5#include <asm-generic/cacheflush.h>
6#include <asm/special_insns.h>
7 6
8#ifdef CONFIG_X86_PAT 7#ifdef CONFIG_X86_PAT
9/* 8/*
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 0fa67503391..a9e3a740f69 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -46,39 +46,41 @@ For 32-bit we have the following conventions - kernel is built with
46 46
47*/ 47*/
48 48
49#include <asm/dwarf2.h> 49#include "dwarf2.h"
50 50
51/* 51/*
52 * 64-bit system call stack frame layout defines and helpers, 52 * 64-bit system call stack frame layout defines and helpers, for
53 * for assembly code: 53 * assembly code (note that the seemingly unnecessary parentheses
54 * are to prevent cpp from inserting spaces in expressions that get
55 * passed to macros):
54 */ 56 */
55 57
56#define R15 0 58#define R15 (0)
57#define R14 8 59#define R14 (8)
58#define R13 16 60#define R13 (16)
59#define R12 24 61#define R12 (24)
60#define RBP 32 62#define RBP (32)
61#define RBX 40 63#define RBX (40)
62 64
63/* arguments: interrupts/non tracing syscalls only save up to here: */ 65/* arguments: interrupts/non tracing syscalls only save up to here: */
64#define R11 48 66#define R11 (48)
65#define R10 56 67#define R10 (56)
66#define R9 64 68#define R9 (64)
67#define R8 72 69#define R8 (72)
68#define RAX 80 70#define RAX (80)
69#define RCX 88 71#define RCX (88)
70#define RDX 96 72#define RDX (96)
71#define RSI 104 73#define RSI (104)
72#define RDI 112 74#define RDI (112)
73#define ORIG_RAX 120 /* + error_code */ 75#define ORIG_RAX (120) /* + error_code */
74/* end of arguments */ 76/* end of arguments */
75 77
76/* cpu exception frame or undefined in case of fast syscall: */ 78/* cpu exception frame or undefined in case of fast syscall: */
77#define RIP 128 79#define RIP (128)
78#define CS 136 80#define CS (136)
79#define EFLAGS 144 81#define EFLAGS (144)
80#define RSP 152 82#define RSP (152)
81#define SS 160 83#define SS (160)
82 84
83#define ARGOFFSET R11 85#define ARGOFFSET R11
84#define SWFRAME ORIG_RAX 86#define SWFRAME ORIG_RAX
diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h
index 5f5bb0f9736..848850fd7d6 100644
--- a/arch/x86/include/asm/checksum.h
+++ b/arch/x86/include/asm/checksum.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include <asm/checksum_32.h> 2# include "checksum_32.h"
3#else 3#else
4# include <asm/checksum_64.h> 4# include "checksum_64.h"
5#endif 5#endif
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
index 16a57f4ed64..0bdbbb3b9ce 100644
--- a/arch/x86/include/asm/clocksource.h
+++ b/arch/x86/include/asm/clocksource.h
@@ -8,7 +8,6 @@
8#define VCLOCK_NONE 0 /* No vDSO clock available. */ 8#define VCLOCK_NONE 0 /* No vDSO clock available. */
9#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ 9#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
10#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */ 10#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */
11#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
12 11
13struct arch_clocksource_data { 12struct arch_clocksource_data {
14 int vclock_mode; 13 int vclock_mode;
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 8d871eaddb6..a460fa088d4 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -1,233 +1,5 @@
1#ifndef ASM_X86_CMPXCHG_H
2#define ASM_X86_CMPXCHG_H
3
4#include <linux/compiler.h>
5#include <asm/alternative.h> /* Provides LOCK_PREFIX */
6
7/*
8 * Non-existant functions to indicate usage errors at link time
9 * (or compile-time if the compiler implements __compiletime_error().
10 */
11extern void __xchg_wrong_size(void)
12 __compiletime_error("Bad argument size for xchg");
13extern void __cmpxchg_wrong_size(void)
14 __compiletime_error("Bad argument size for cmpxchg");
15extern void __xadd_wrong_size(void)
16 __compiletime_error("Bad argument size for xadd");
17extern void __add_wrong_size(void)
18 __compiletime_error("Bad argument size for add");
19
20/*
21 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
22 * -1 because sizeof will never return -1, thereby making those switch
23 * case statements guaranteeed dead code which the compiler will
24 * eliminate, and allowing the "missing symbol in the default case" to
25 * indicate a usage error.
26 */
27#define __X86_CASE_B 1
28#define __X86_CASE_W 2
29#define __X86_CASE_L 4
30#ifdef CONFIG_64BIT
31#define __X86_CASE_Q 8
32#else
33#define __X86_CASE_Q -1 /* sizeof will never return -1 */
34#endif
35
36/*
37 * An exchange-type operation, which takes a value and a pointer, and
38 * returns a the old value.
39 */
40#define __xchg_op(ptr, arg, op, lock) \
41 ({ \
42 __typeof__ (*(ptr)) __ret = (arg); \
43 switch (sizeof(*(ptr))) { \
44 case __X86_CASE_B: \
45 asm volatile (lock #op "b %b0, %1\n" \
46 : "+q" (__ret), "+m" (*(ptr)) \
47 : : "memory", "cc"); \
48 break; \
49 case __X86_CASE_W: \
50 asm volatile (lock #op "w %w0, %1\n" \
51 : "+r" (__ret), "+m" (*(ptr)) \
52 : : "memory", "cc"); \
53 break; \
54 case __X86_CASE_L: \
55 asm volatile (lock #op "l %0, %1\n" \
56 : "+r" (__ret), "+m" (*(ptr)) \
57 : : "memory", "cc"); \
58 break; \
59 case __X86_CASE_Q: \
60 asm volatile (lock #op "q %q0, %1\n" \
61 : "+r" (__ret), "+m" (*(ptr)) \
62 : : "memory", "cc"); \
63 break; \
64 default: \
65 __ ## op ## _wrong_size(); \
66 } \
67 __ret; \
68 })
69
70/*
71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
72 * Since this is generally used to protect other memory information, we
73 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
74 * information around.
75 */
76#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
77
78/*
79 * Atomic compare and exchange. Compare OLD with MEM, if identical,
80 * store NEW in MEM. Return the initial value in MEM. Success is
81 * indicated by comparing RETURN with OLD.
82 */
83#define __raw_cmpxchg(ptr, old, new, size, lock) \
84({ \
85 __typeof__(*(ptr)) __ret; \
86 __typeof__(*(ptr)) __old = (old); \
87 __typeof__(*(ptr)) __new = (new); \
88 switch (size) { \
89 case __X86_CASE_B: \
90 { \
91 volatile u8 *__ptr = (volatile u8 *)(ptr); \
92 asm volatile(lock "cmpxchgb %2,%1" \
93 : "=a" (__ret), "+m" (*__ptr) \
94 : "q" (__new), "0" (__old) \
95 : "memory"); \
96 break; \
97 } \
98 case __X86_CASE_W: \
99 { \
100 volatile u16 *__ptr = (volatile u16 *)(ptr); \
101 asm volatile(lock "cmpxchgw %2,%1" \
102 : "=a" (__ret), "+m" (*__ptr) \
103 : "r" (__new), "0" (__old) \
104 : "memory"); \
105 break; \
106 } \
107 case __X86_CASE_L: \
108 { \
109 volatile u32 *__ptr = (volatile u32 *)(ptr); \
110 asm volatile(lock "cmpxchgl %2,%1" \
111 : "=a" (__ret), "+m" (*__ptr) \
112 : "r" (__new), "0" (__old) \
113 : "memory"); \
114 break; \
115 } \
116 case __X86_CASE_Q: \
117 { \
118 volatile u64 *__ptr = (volatile u64 *)(ptr); \
119 asm volatile(lock "cmpxchgq %2,%1" \
120 : "=a" (__ret), "+m" (*__ptr) \
121 : "r" (__new), "0" (__old) \
122 : "memory"); \
123 break; \
124 } \
125 default: \
126 __cmpxchg_wrong_size(); \
127 } \
128 __ret; \
129})
130
131#define __cmpxchg(ptr, old, new, size) \
132 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
133
134#define __sync_cmpxchg(ptr, old, new, size) \
135 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
136
137#define __cmpxchg_local(ptr, old, new, size) \
138 __raw_cmpxchg((ptr), (old), (new), (size), "")
139
140#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
141# include <asm/cmpxchg_32.h> 2# include "cmpxchg_32.h"
142#else 3#else
143# include <asm/cmpxchg_64.h> 4# include "cmpxchg_64.h"
144#endif
145
146#ifdef __HAVE_ARCH_CMPXCHG
147#define cmpxchg(ptr, old, new) \
148 __cmpxchg(ptr, old, new, sizeof(*(ptr)))
149
150#define sync_cmpxchg(ptr, old, new) \
151 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
152
153#define cmpxchg_local(ptr, old, new) \
154 __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
155#endif 5#endif
156
157/*
158 * xadd() adds "inc" to "*ptr" and atomically returns the previous
159 * value of "*ptr".
160 *
161 * xadd() is locked when multiple CPUs are online
162 * xadd_sync() is always locked
163 * xadd_local() is never locked
164 */
165#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
166#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
167#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
168#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
169
170#define __add(ptr, inc, lock) \
171 ({ \
172 __typeof__ (*(ptr)) __ret = (inc); \
173 switch (sizeof(*(ptr))) { \
174 case __X86_CASE_B: \
175 asm volatile (lock "addb %b1, %0\n" \
176 : "+m" (*(ptr)) : "qi" (inc) \
177 : "memory", "cc"); \
178 break; \
179 case __X86_CASE_W: \
180 asm volatile (lock "addw %w1, %0\n" \
181 : "+m" (*(ptr)) : "ri" (inc) \
182 : "memory", "cc"); \
183 break; \
184 case __X86_CASE_L: \
185 asm volatile (lock "addl %1, %0\n" \
186 : "+m" (*(ptr)) : "ri" (inc) \
187 : "memory", "cc"); \
188 break; \
189 case __X86_CASE_Q: \
190 asm volatile (lock "addq %1, %0\n" \
191 : "+m" (*(ptr)) : "ri" (inc) \
192 : "memory", "cc"); \
193 break; \
194 default: \
195 __add_wrong_size(); \
196 } \
197 __ret; \
198 })
199
200/*
201 * add_*() adds "inc" to "*ptr"
202 *
203 * __add() takes a lock prefix
204 * add_smp() is locked when multiple CPUs are online
205 * add_sync() is always locked
206 */
207#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
208#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
209
210#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
211({ \
212 bool __ret; \
213 __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
214 __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
215 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
216 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
217 VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
218 VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
219 asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
220 : "=a" (__ret), "+d" (__old2), \
221 "+m" (*(p1)), "+m" (*(p2)) \
222 : "i" (2 * sizeof(long)), "a" (__old1), \
223 "b" (__new1), "c" (__new2)); \
224 __ret; \
225})
226
227#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
228 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
229
230#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
231 __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
232
233#endif /* ASM_X86_CMPXCHG_H */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index f8bf2eecab8..3deb7250624 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -1,11 +1,61 @@
1#ifndef _ASM_X86_CMPXCHG_32_H 1#ifndef _ASM_X86_CMPXCHG_32_H
2#define _ASM_X86_CMPXCHG_32_H 2#define _ASM_X86_CMPXCHG_32_H
3 3
4#include <linux/bitops.h> /* for LOCK_PREFIX */
5
4/* 6/*
5 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you 7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
6 * you need to test for the feature in boot_cpu_data. 8 * you need to test for the feature in boot_cpu_data.
7 */ 9 */
8 10
11extern void __xchg_wrong_size(void);
12
13/*
14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
15 * Since this is generally used to protect other memory information, we
16 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
17 * information around.
18 */
19#define __xchg(x, ptr, size) \
20({ \
21 __typeof(*(ptr)) __x = (x); \
22 switch (size) { \
23 case 1: \
24 { \
25 volatile u8 *__ptr = (volatile u8 *)(ptr); \
26 asm volatile("xchgb %0,%1" \
27 : "=q" (__x), "+m" (*__ptr) \
28 : "0" (__x) \
29 : "memory"); \
30 break; \
31 } \
32 case 2: \
33 { \
34 volatile u16 *__ptr = (volatile u16 *)(ptr); \
35 asm volatile("xchgw %0,%1" \
36 : "=r" (__x), "+m" (*__ptr) \
37 : "0" (__x) \
38 : "memory"); \
39 break; \
40 } \
41 case 4: \
42 { \
43 volatile u32 *__ptr = (volatile u32 *)(ptr); \
44 asm volatile("xchgl %0,%1" \
45 : "=r" (__x), "+m" (*__ptr) \
46 : "0" (__x) \
47 : "memory"); \
48 break; \
49 } \
50 default: \
51 __xchg_wrong_size(); \
52 } \
53 __x; \
54})
55
56#define xchg(ptr, v) \
57 __xchg((v), (ptr), sizeof(*ptr))
58
9/* 59/*
10 * CMPXCHG8B only writes to the target if we had the previous 60 * CMPXCHG8B only writes to the target if we had the previous
11 * value in registers, otherwise it acts as a read and gives us the 61 * value in registers, otherwise it acts as a read and gives us the
@@ -34,8 +84,74 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
34 : "memory"); 84 : "memory");
35} 85}
36 86
87extern void __cmpxchg_wrong_size(void);
88
89/*
90 * Atomic compare and exchange. Compare OLD with MEM, if identical,
91 * store NEW in MEM. Return the initial value in MEM. Success is
92 * indicated by comparing RETURN with OLD.
93 */
94#define __raw_cmpxchg(ptr, old, new, size, lock) \
95({ \
96 __typeof__(*(ptr)) __ret; \
97 __typeof__(*(ptr)) __old = (old); \
98 __typeof__(*(ptr)) __new = (new); \
99 switch (size) { \
100 case 1: \
101 { \
102 volatile u8 *__ptr = (volatile u8 *)(ptr); \
103 asm volatile(lock "cmpxchgb %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "q" (__new), "0" (__old) \
106 : "memory"); \
107 break; \
108 } \
109 case 2: \
110 { \
111 volatile u16 *__ptr = (volatile u16 *)(ptr); \
112 asm volatile(lock "cmpxchgw %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
115 : "memory"); \
116 break; \
117 } \
118 case 4: \
119 { \
120 volatile u32 *__ptr = (volatile u32 *)(ptr); \
121 asm volatile(lock "cmpxchgl %2,%1" \
122 : "=a" (__ret), "+m" (*__ptr) \
123 : "r" (__new), "0" (__old) \
124 : "memory"); \
125 break; \
126 } \
127 default: \
128 __cmpxchg_wrong_size(); \
129 } \
130 __ret; \
131})
132
133#define __cmpxchg(ptr, old, new, size) \
134 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
135
136#define __sync_cmpxchg(ptr, old, new, size) \
137 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
138
139#define __cmpxchg_local(ptr, old, new, size) \
140 __raw_cmpxchg((ptr), (old), (new), (size), "")
141
142#ifdef CONFIG_X86_CMPXCHG
37#define __HAVE_ARCH_CMPXCHG 1 143#define __HAVE_ARCH_CMPXCHG 1
38 144
145#define cmpxchg(ptr, old, new) \
146 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
147
148#define sync_cmpxchg(ptr, old, new) \
149 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
150
151#define cmpxchg_local(ptr, old, new) \
152 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
153#endif
154
39#ifdef CONFIG_X86_CMPXCHG64 155#ifdef CONFIG_X86_CMPXCHG64
40#define cmpxchg64(ptr, o, n) \ 156#define cmpxchg64(ptr, o, n) \
41 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ 157 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
@@ -71,6 +187,59 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
71 return prev; 187 return prev;
72} 188}
73 189
190#ifndef CONFIG_X86_CMPXCHG
191/*
192 * Building a kernel capable running on 80386. It may be necessary to
193 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
194 * a function for each of the sizes we support.
195 */
196
197extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
198extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
199extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
200
201static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
202 unsigned long new, int size)
203{
204 switch (size) {
205 case 1:
206 return cmpxchg_386_u8(ptr, old, new);
207 case 2:
208 return cmpxchg_386_u16(ptr, old, new);
209 case 4:
210 return cmpxchg_386_u32(ptr, old, new);
211 }
212 return old;
213}
214
215#define cmpxchg(ptr, o, n) \
216({ \
217 __typeof__(*(ptr)) __ret; \
218 if (likely(boot_cpu_data.x86 > 3)) \
219 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
220 (unsigned long)(o), (unsigned long)(n), \
221 sizeof(*(ptr))); \
222 else \
223 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
224 (unsigned long)(o), (unsigned long)(n), \
225 sizeof(*(ptr))); \
226 __ret; \
227})
228#define cmpxchg_local(ptr, o, n) \
229({ \
230 __typeof__(*(ptr)) __ret; \
231 if (likely(boot_cpu_data.x86 > 3)) \
232 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
233 (unsigned long)(o), (unsigned long)(n), \
234 sizeof(*(ptr))); \
235 else \
236 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
237 (unsigned long)(o), (unsigned long)(n), \
238 sizeof(*(ptr))); \
239 __ret; \
240})
241#endif
242
74#ifndef CONFIG_X86_CMPXCHG64 243#ifndef CONFIG_X86_CMPXCHG64
75/* 244/*
76 * Building a kernel capable running on 80386 and 80486. It may be necessary 245 * Building a kernel capable running on 80386 and 80486. It may be necessary
@@ -111,6 +280,52 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
111 280
112#endif 281#endif
113 282
283#define cmpxchg8b(ptr, o1, o2, n1, n2) \
284({ \
285 char __ret; \
286 __typeof__(o2) __dummy; \
287 __typeof__(*(ptr)) __old1 = (o1); \
288 __typeof__(o2) __old2 = (o2); \
289 __typeof__(*(ptr)) __new1 = (n1); \
290 __typeof__(o2) __new2 = (n2); \
291 asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \
292 : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
293 : "a" (__old1), "d"(__old2), \
294 "b" (__new1), "c" (__new2) \
295 : "memory"); \
296 __ret; })
297
298
299#define cmpxchg8b_local(ptr, o1, o2, n1, n2) \
300({ \
301 char __ret; \
302 __typeof__(o2) __dummy; \
303 __typeof__(*(ptr)) __old1 = (o1); \
304 __typeof__(o2) __old2 = (o2); \
305 __typeof__(*(ptr)) __new1 = (n1); \
306 __typeof__(o2) __new2 = (n2); \
307 asm volatile("cmpxchg8b %2; setz %1" \
308 : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
309 : "a" (__old), "d"(__old2), \
310 "b" (__new1), "c" (__new2), \
311 : "memory"); \
312 __ret; })
313
314
315#define cmpxchg_double(ptr, o1, o2, n1, n2) \
316({ \
317 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
318 VM_BUG_ON((unsigned long)(ptr) % 8); \
319 cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \
320})
321
322#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
323({ \
324 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
325 VM_BUG_ON((unsigned long)(ptr) % 8); \
326 cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
327})
328
114#define system_has_cmpxchg_double() cpu_has_cx8 329#define system_has_cmpxchg_double() cpu_has_cx8
115 330
116#endif /* _ASM_X86_CMPXCHG_32_H */ 331#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 614be87f1a9..7cf5c0a2443 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -1,13 +1,144 @@
1#ifndef _ASM_X86_CMPXCHG_64_H 1#ifndef _ASM_X86_CMPXCHG_64_H
2#define _ASM_X86_CMPXCHG_64_H 2#define _ASM_X86_CMPXCHG_64_H
3 3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
4static inline void set_64bit(volatile u64 *ptr, u64 val) 6static inline void set_64bit(volatile u64 *ptr, u64 val)
5{ 7{
6 *ptr = val; 8 *ptr = val;
7} 9}
8 10
11extern void __xchg_wrong_size(void);
12extern void __cmpxchg_wrong_size(void);
13
14/*
15 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16 * Since this is generally used to protect other memory information, we
17 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
18 * information around.
19 */
20#define __xchg(x, ptr, size) \
21({ \
22 __typeof(*(ptr)) __x = (x); \
23 switch (size) { \
24 case 1: \
25 { \
26 volatile u8 *__ptr = (volatile u8 *)(ptr); \
27 asm volatile("xchgb %0,%1" \
28 : "=q" (__x), "+m" (*__ptr) \
29 : "0" (__x) \
30 : "memory"); \
31 break; \
32 } \
33 case 2: \
34 { \
35 volatile u16 *__ptr = (volatile u16 *)(ptr); \
36 asm volatile("xchgw %0,%1" \
37 : "=r" (__x), "+m" (*__ptr) \
38 : "0" (__x) \
39 : "memory"); \
40 break; \
41 } \
42 case 4: \
43 { \
44 volatile u32 *__ptr = (volatile u32 *)(ptr); \
45 asm volatile("xchgl %0,%1" \
46 : "=r" (__x), "+m" (*__ptr) \
47 : "0" (__x) \
48 : "memory"); \
49 break; \
50 } \
51 case 8: \
52 { \
53 volatile u64 *__ptr = (volatile u64 *)(ptr); \
54 asm volatile("xchgq %0,%1" \
55 : "=r" (__x), "+m" (*__ptr) \
56 : "0" (__x) \
57 : "memory"); \
58 break; \
59 } \
60 default: \
61 __xchg_wrong_size(); \
62 } \
63 __x; \
64})
65
66#define xchg(ptr, v) \
67 __xchg((v), (ptr), sizeof(*ptr))
68
9#define __HAVE_ARCH_CMPXCHG 1 69#define __HAVE_ARCH_CMPXCHG 1
10 70
71/*
72 * Atomic compare and exchange. Compare OLD with MEM, if identical,
73 * store NEW in MEM. Return the initial value in MEM. Success is
74 * indicated by comparing RETURN with OLD.
75 */
76#define __raw_cmpxchg(ptr, old, new, size, lock) \
77({ \
78 __typeof__(*(ptr)) __ret; \
79 __typeof__(*(ptr)) __old = (old); \
80 __typeof__(*(ptr)) __new = (new); \
81 switch (size) { \
82 case 1: \
83 { \
84 volatile u8 *__ptr = (volatile u8 *)(ptr); \
85 asm volatile(lock "cmpxchgb %2,%1" \
86 : "=a" (__ret), "+m" (*__ptr) \
87 : "q" (__new), "0" (__old) \
88 : "memory"); \
89 break; \
90 } \
91 case 2: \
92 { \
93 volatile u16 *__ptr = (volatile u16 *)(ptr); \
94 asm volatile(lock "cmpxchgw %2,%1" \
95 : "=a" (__ret), "+m" (*__ptr) \
96 : "r" (__new), "0" (__old) \
97 : "memory"); \
98 break; \
99 } \
100 case 4: \
101 { \
102 volatile u32 *__ptr = (volatile u32 *)(ptr); \
103 asm volatile(lock "cmpxchgl %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "r" (__new), "0" (__old) \
106 : "memory"); \
107 break; \
108 } \
109 case 8: \
110 { \
111 volatile u64 *__ptr = (volatile u64 *)(ptr); \
112 asm volatile(lock "cmpxchgq %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
115 : "memory"); \
116 break; \
117 } \
118 default: \
119 __cmpxchg_wrong_size(); \
120 } \
121 __ret; \
122})
123
124#define __cmpxchg(ptr, old, new, size) \
125 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
126
127#define __sync_cmpxchg(ptr, old, new, size) \
128 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
129
130#define __cmpxchg_local(ptr, old, new, size) \
131 __raw_cmpxchg((ptr), (old), (new), (size), "")
132
133#define cmpxchg(ptr, old, new) \
134 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
135
136#define sync_cmpxchg(ptr, old, new) \
137 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
138
139#define cmpxchg_local(ptr, old, new) \
140 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
141
11#define cmpxchg64(ptr, o, n) \ 142#define cmpxchg64(ptr, o, n) \
12({ \ 143({ \
13 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 144 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
@@ -20,6 +151,49 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
20 cmpxchg_local((ptr), (o), (n)); \ 151 cmpxchg_local((ptr), (o), (n)); \
21}) 152})
22 153
154#define cmpxchg16b(ptr, o1, o2, n1, n2) \
155({ \
156 char __ret; \
157 __typeof__(o2) __junk; \
158 __typeof__(*(ptr)) __old1 = (o1); \
159 __typeof__(o2) __old2 = (o2); \
160 __typeof__(*(ptr)) __new1 = (n1); \
161 __typeof__(o2) __new2 = (n2); \
162 asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1" \
163 : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
164 : "b"(__new1), "c"(__new2), \
165 "a"(__old1), "d"(__old2)); \
166 __ret; })
167
168
169#define cmpxchg16b_local(ptr, o1, o2, n1, n2) \
170({ \
171 char __ret; \
172 __typeof__(o2) __junk; \
173 __typeof__(*(ptr)) __old1 = (o1); \
174 __typeof__(o2) __old2 = (o2); \
175 __typeof__(*(ptr)) __new1 = (n1); \
176 __typeof__(o2) __new2 = (n2); \
177 asm volatile("cmpxchg16b %2;setz %1" \
178 : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
179 : "b"(__new1), "c"(__new2), \
180 "a"(__old1), "d"(__old2)); \
181 __ret; })
182
183#define cmpxchg_double(ptr, o1, o2, n1, n2) \
184({ \
185 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
186 VM_BUG_ON((unsigned long)(ptr) % 16); \
187 cmpxchg16b((ptr), (o1), (o2), (n1), (n2)); \
188})
189
190#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
191({ \
192 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
193 VM_BUG_ON((unsigned long)(ptr) % 16); \
194 cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
195})
196
23#define system_has_cmpxchg_double() cpu_has_cx16 197#define system_has_cmpxchg_double() cpu_has_cx16
24 198
25#endif /* _ASM_X86_CMPXCHG_64_H */ 199#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 59c6c401f79..1d9cd27c292 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -6,9 +6,7 @@
6 */ 6 */
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <asm/processor.h>
10#include <asm/user32.h> 9#include <asm/user32.h>
11#include <asm/unistd.h>
12 10
13#define COMPAT_USER_HZ 100 11#define COMPAT_USER_HZ 100
14#define COMPAT_UTS_MACHINE "i686\0\0" 12#define COMPAT_UTS_MACHINE "i686\0\0"
@@ -41,7 +39,6 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
41typedef u32 compat_uint_t; 39typedef u32 compat_uint_t;
42typedef u32 compat_ulong_t; 40typedef u32 compat_ulong_t;
43typedef u64 __attribute__((aligned(4))) compat_u64; 41typedef u64 __attribute__((aligned(4))) compat_u64;
44typedef u32 compat_uptr_t;
45 42
46struct compat_timespec { 43struct compat_timespec {
47 compat_time_t tv_sec; 44 compat_time_t tv_sec;
@@ -111,8 +108,7 @@ struct compat_statfs {
111 compat_fsid_t f_fsid; 108 compat_fsid_t f_fsid;
112 int f_namelen; /* SunOS ignores this field. */ 109 int f_namelen; /* SunOS ignores this field. */
113 int f_frsize; 110 int f_frsize;
114 int f_flags; 111 int f_spare[5];
115 int f_spare[4];
116}; 112};
117 113
118#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff 114#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@@ -125,78 +121,6 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
125 121
126typedef u32 compat_sigset_word; 122typedef u32 compat_sigset_word;
127 123
128typedef union compat_sigval {
129 compat_int_t sival_int;
130 compat_uptr_t sival_ptr;
131} compat_sigval_t;
132
133typedef struct compat_siginfo {
134 int si_signo;
135 int si_errno;
136 int si_code;
137
138 union {
139 int _pad[128/sizeof(int) - 3];
140
141 /* kill() */
142 struct {
143 unsigned int _pid; /* sender's pid */
144 unsigned int _uid; /* sender's uid */
145 } _kill;
146
147 /* POSIX.1b timers */
148 struct {
149 compat_timer_t _tid; /* timer id */
150 int _overrun; /* overrun count */
151 compat_sigval_t _sigval; /* same as below */
152 int _sys_private; /* not to be passed to user */
153 int _overrun_incr; /* amount to add to overrun */
154 } _timer;
155
156 /* POSIX.1b signals */
157 struct {
158 unsigned int _pid; /* sender's pid */
159 unsigned int _uid; /* sender's uid */
160 compat_sigval_t _sigval;
161 } _rt;
162
163 /* SIGCHLD */
164 struct {
165 unsigned int _pid; /* which child */
166 unsigned int _uid; /* sender's uid */
167 int _status; /* exit code */
168 compat_clock_t _utime;
169 compat_clock_t _stime;
170 } _sigchld;
171
172 /* SIGCHLD (x32 version) */
173 struct {
174 unsigned int _pid; /* which child */
175 unsigned int _uid; /* sender's uid */
176 int _status; /* exit code */
177 compat_s64 _utime;
178 compat_s64 _stime;
179 } _sigchld_x32;
180
181 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
182 struct {
183 unsigned int _addr; /* faulting insn/memory ref. */
184 } _sigfault;
185
186 /* SIGPOLL */
187 struct {
188 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
189 int _fd;
190 } _sigpoll;
191
192 struct {
193 unsigned int _call_addr; /* calling insn */
194 int _syscall; /* triggering system call number */
195 unsigned int _arch; /* AUDIT_ARCH_* of syscall */
196 } _sigsys;
197 } _sifields;
198} compat_siginfo_t;
199
200#define COMPAT_OFF_T_MAX 0x7fffffff 124#define COMPAT_OFF_T_MAX 0x7fffffff
201#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL 125#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
202 126
@@ -261,20 +185,7 @@ struct compat_shmid64_ds {
261/* 185/*
262 * The type of struct elf_prstatus.pr_reg in compatible core dumps. 186 * The type of struct elf_prstatus.pr_reg in compatible core dumps.
263 */ 187 */
264#ifdef CONFIG_X86_X32_ABI
265typedef struct user_regs_struct compat_elf_gregset_t;
266
267#define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216)
268#define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296)
269#define SET_PR_FPVALID(S,V) \
270 do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \
271 while (0)
272
273#define COMPAT_USE_64BIT_TIME \
274 (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
275#else
276typedef struct user_regs_struct32 compat_elf_gregset_t; 188typedef struct user_regs_struct32 compat_elf_gregset_t;
277#endif
278 189
279/* 190/*
280 * A pointer passed in from user mode. This should not 191 * A pointer passed in from user mode. This should not
@@ -282,6 +193,7 @@ typedef struct user_regs_struct32 compat_elf_gregset_t;
282 * as pointers because the syscall entry code will have 193 * as pointers because the syscall entry code will have
283 * appropriately converted them already. 194 * appropriately converted them already.
284 */ 195 */
196typedef u32 compat_uptr_t;
285 197
286static inline void __user *compat_ptr(compat_uptr_t uptr) 198static inline void __user *compat_ptr(compat_uptr_t uptr)
287{ 199{
@@ -295,30 +207,13 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
295 207
296static inline void __user *arch_compat_alloc_user_space(long len) 208static inline void __user *arch_compat_alloc_user_space(long len)
297{ 209{
298 compat_uptr_t sp; 210 struct pt_regs *regs = task_pt_regs(current);
299 211 return (void __user *)regs->sp - len;
300 if (test_thread_flag(TIF_IA32)) {
301 sp = task_pt_regs(current)->sp;
302 } else {
303 /* -128 for the x32 ABI redzone */
304 sp = this_cpu_read(old_rsp) - 128;
305 }
306
307 return (void __user *)round_down(sp - len, 16);
308}
309
310static inline bool is_x32_task(void)
311{
312#ifdef CONFIG_X86_X32_ABI
313 if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
314 return true;
315#endif
316 return false;
317} 212}
318 213
319static inline bool is_compat_task(void) 214static inline int is_compat_task(void)
320{ 215{
321 return is_ia32_task() || is_x32_task(); 216 return current_thread_info()->status & TS_COMPAT;
322} 217}
323 218
324#endif /* _ASM_X86_COMPAT_H */ 219#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/context_tracking.h b/arch/x86/include/asm/context_tracking.h
deleted file mode 100644
index 1616562683e..00000000000
--- a/arch/x86/include/asm/context_tracking.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _ASM_X86_CONTEXT_TRACKING_H
2#define _ASM_X86_CONTEXT_TRACKING_H
3
4#ifndef __ASSEMBLY__
5#include <linux/context_tracking.h>
6#include <asm/ptrace.h>
7
8static inline void exception_enter(struct pt_regs *regs)
9{
10 user_exit();
11}
12
13static inline void exception_exit(struct pt_regs *regs)
14{
15#ifdef CONFIG_CONTEXT_TRACKING
16 if (user_mode(regs))
17 user_enter();
18#endif
19}
20
21#else /* __ASSEMBLY__ */
22
23#ifdef CONFIG_CONTEXT_TRACKING
24# define SCHEDULE_USER call schedule_user
25#else
26# define SCHEDULE_USER call schedule
27#endif
28
29#endif /* !__ASSEMBLY__ */
30
31#endif
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 5f9a1243190..4564c8e28a3 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -28,10 +28,6 @@ struct x86_cpu {
28#ifdef CONFIG_HOTPLUG_CPU 28#ifdef CONFIG_HOTPLUG_CPU
29extern int arch_register_cpu(int num); 29extern int arch_register_cpu(int num);
30extern void arch_unregister_cpu(int); 30extern void arch_unregister_cpu(int);
31extern void __cpuinit start_cpu0(void);
32#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
33extern int _debug_hotplug_cpu(int cpu, int action);
34#endif
35#endif 31#endif
36 32
37DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
deleted file mode 100644
index ff501e511d9..00000000000
--- a/arch/x86/include/asm/cpu_device_id.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef _CPU_DEVICE_ID
2#define _CPU_DEVICE_ID 1
3
4/*
5 * Declare drivers belonging to specific x86 CPUs
6 * Similar in spirit to pci_device_id and related PCI functions
7 */
8
9#include <linux/mod_devicetable.h>
10
11extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
12
13#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 2d9075e863a..88b23a43f34 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -4,9 +4,7 @@
4#ifndef _ASM_X86_CPUFEATURE_H 4#ifndef _ASM_X86_CPUFEATURE_H
5#define _ASM_X86_CPUFEATURE_H 5#define _ASM_X86_CPUFEATURE_H
6 6
7#ifndef _ASM_X86_REQUIRED_FEATURES_H
8#include <asm/required-features.h> 7#include <asm/required-features.h>
9#endif
10 8
11#define NCAPINTS 10 /* N 32-bit words worth of info */ 9#define NCAPINTS 10 /* N 32-bit words worth of info */
12 10
@@ -99,7 +97,6 @@
99#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ 97#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
100#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ 98#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
101#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ 99#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
102#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
103 100
104/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 101/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
105#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 102#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
@@ -117,14 +114,12 @@
117#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ 114#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
118#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ 115#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
119#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ 116#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
120#define X86_FEATURE_PCID (4*32+17) /* Process Context Identifiers */
121#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ 117#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
122#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ 118#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
123#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ 119#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
124#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ 120#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
125#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */ 121#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */
126#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */ 122#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
127#define X86_FEATURE_TSC_DEADLINE_TIMER (4*32+24) /* Tsc deadline timer */
128#define X86_FEATURE_AES (4*32+25) /* AES instructions */ 123#define X86_FEATURE_AES (4*32+25) /* AES instructions */
129#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 124#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
130#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ 125#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
@@ -162,7 +157,6 @@
162#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ 157#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
163#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */ 158#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
164#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */ 159#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
165#define X86_FEATURE_TCE (6*32+17) /* translation cache extension */
166#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ 160#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
167#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ 161#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
168#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ 162#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
@@ -179,8 +173,7 @@
179#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ 173#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
180#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ 174#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
181#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ 175#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
182#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */ 176#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
183#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
184 177
185/* Virtualization flags: Linux defined, word 8 */ 178/* Virtualization flags: Linux defined, word 8 */
186#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 179#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -202,18 +195,8 @@
202 195
203/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 196/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
204#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 197#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
205#define X86_FEATURE_TSC_ADJUST (9*32+ 1) /* TSC adjustment MSR 0x3b */
206#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
207#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
208#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
209#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ 198#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
210#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
211#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 199#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
212#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
213#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
214#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
215#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
216#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
217 200
218#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 201#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
219 202
@@ -274,9 +257,7 @@ extern const char * const x86_power_flags[32];
274#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 257#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
275#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 258#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
276#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) 259#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
277#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
278#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) 260#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
279#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
280#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) 261#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
281#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) 262#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
282#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) 263#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
@@ -304,15 +285,17 @@ extern const char * const x86_power_flags[32];
304#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 285#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
305#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 286#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
306#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 287#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
307#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
308#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
309#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 288#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
310#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 289#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
311#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 290#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
312#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 291#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
313#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 292#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
314#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 293
315#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) 294#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
295# define cpu_has_invlpg 1
296#else
297# define cpu_has_invlpg (boot_cpu_data.x86 > 3)
298#endif
316 299
317#ifdef CONFIG_X86_64 300#ifdef CONFIG_X86_64
318 301
diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/arch/x86/include/asm/crypto/ablk_helper.h
deleted file mode 100644
index 4f93df50c23..00000000000
--- a/arch/x86/include/asm/crypto/ablk_helper.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Shared async block cipher helpers
3 */
4
5#ifndef _CRYPTO_ABLK_HELPER_H
6#define _CRYPTO_ABLK_HELPER_H
7
8#include <linux/crypto.h>
9#include <linux/kernel.h>
10#include <crypto/cryptd.h>
11
12struct async_helper_ctx {
13 struct cryptd_ablkcipher *cryptd_tfm;
14};
15
16extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
17 unsigned int key_len);
18
19extern int __ablk_encrypt(struct ablkcipher_request *req);
20
21extern int ablk_encrypt(struct ablkcipher_request *req);
22
23extern int ablk_decrypt(struct ablkcipher_request *req);
24
25extern void ablk_exit(struct crypto_tfm *tfm);
26
27extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name);
28
29extern int ablk_init(struct crypto_tfm *tfm);
30
31#endif /* _CRYPTO_ABLK_HELPER_H */
diff --git a/arch/x86/include/asm/crypto/aes.h b/arch/x86/include/asm/crypto/aes.h
deleted file mode 100644
index 80545a1cbe3..00000000000
--- a/arch/x86/include/asm/crypto/aes.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef ASM_X86_AES_H
2#define ASM_X86_AES_H
3
4#include <linux/crypto.h>
5#include <crypto/aes.h>
6
7void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
8 const u8 *src);
9void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
10 const u8 *src);
11#endif
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
deleted file mode 100644
index 98038add801..00000000000
--- a/arch/x86/include/asm/crypto/camellia.h
+++ /dev/null
@@ -1,82 +0,0 @@
1#ifndef ASM_X86_CAMELLIA_H
2#define ASM_X86_CAMELLIA_H
3
4#include <linux/kernel.h>
5#include <linux/crypto.h>
6
7#define CAMELLIA_MIN_KEY_SIZE 16
8#define CAMELLIA_MAX_KEY_SIZE 32
9#define CAMELLIA_BLOCK_SIZE 16
10#define CAMELLIA_TABLE_BYTE_LEN 272
11#define CAMELLIA_PARALLEL_BLOCKS 2
12
13struct camellia_ctx {
14 u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
15 u32 key_length;
16};
17
18struct camellia_lrw_ctx {
19 struct lrw_table_ctx lrw_table;
20 struct camellia_ctx camellia_ctx;
21};
22
23struct camellia_xts_ctx {
24 struct camellia_ctx tweak_ctx;
25 struct camellia_ctx crypt_ctx;
26};
27
28extern int __camellia_setkey(struct camellia_ctx *cctx,
29 const unsigned char *key,
30 unsigned int key_len, u32 *flags);
31
32extern int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
33 unsigned int keylen);
34extern void lrw_camellia_exit_tfm(struct crypto_tfm *tfm);
35
36extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
37 unsigned int keylen);
38
39/* regular block cipher functions */
40asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
41 const u8 *src, bool xor);
42asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
43 const u8 *src);
44
45/* 2-way parallel cipher functions */
46asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
47 const u8 *src, bool xor);
48asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
49 const u8 *src);
50
51static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
52 const u8 *src)
53{
54 __camellia_enc_blk(ctx, dst, src, false);
55}
56
57static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
58 const u8 *src)
59{
60 __camellia_enc_blk(ctx, dst, src, true);
61}
62
63static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
64 const u8 *src)
65{
66 __camellia_enc_blk_2way(ctx, dst, src, false);
67}
68
69static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
70 const u8 *src)
71{
72 __camellia_enc_blk_2way(ctx, dst, src, true);
73}
74
75/* glue helpers */
76extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
77extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
78 le128 *iv);
79extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
80 le128 *iv);
81
82#endif /* ASM_X86_CAMELLIA_H */
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
deleted file mode 100644
index e2d65b061d2..00000000000
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * Shared glue code for 128bit block ciphers
3 */
4
5#ifndef _CRYPTO_GLUE_HELPER_H
6#define _CRYPTO_GLUE_HELPER_H
7
8#include <linux/kernel.h>
9#include <linux/crypto.h>
10#include <asm/i387.h>
11#include <crypto/b128ops.h>
12
13typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
14typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
15typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
16 le128 *iv);
17
18#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
19#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
20#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
21
22struct common_glue_func_entry {
23 unsigned int num_blocks; /* number of blocks that @fn will process */
24 union {
25 common_glue_func_t ecb;
26 common_glue_cbc_func_t cbc;
27 common_glue_ctr_func_t ctr;
28 } fn_u;
29};
30
31struct common_glue_ctx {
32 unsigned int num_funcs;
33 int fpu_blocks_limit; /* -1 means fpu not needed at all */
34
35 /*
36 * First funcs entry must have largest num_blocks and last funcs entry
37 * must have num_blocks == 1!
38 */
39 struct common_glue_func_entry funcs[];
40};
41
42static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
43 struct blkcipher_desc *desc,
44 bool fpu_enabled, unsigned int nbytes)
45{
46 if (likely(fpu_blocks_limit < 0))
47 return false;
48
49 if (fpu_enabled)
50 return true;
51
52 /*
53 * Vector-registers are only used when chunk to be processed is large
54 * enough, so do not enable FPU until it is necessary.
55 */
56 if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
57 return false;
58
59 if (desc) {
60 /* prevent sleeping if FPU is in use */
61 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
62 }
63
64 kernel_fpu_begin();
65 return true;
66}
67
68static inline void glue_fpu_end(bool fpu_enabled)
69{
70 if (fpu_enabled)
71 kernel_fpu_end();
72}
73
74static inline void le128_to_be128(be128 *dst, const le128 *src)
75{
76 dst->a = cpu_to_be64(le64_to_cpu(src->a));
77 dst->b = cpu_to_be64(le64_to_cpu(src->b));
78}
79
80static inline void be128_to_le128(le128 *dst, const be128 *src)
81{
82 dst->a = cpu_to_le64(be64_to_cpu(src->a));
83 dst->b = cpu_to_le64(be64_to_cpu(src->b));
84}
85
86static inline void le128_inc(le128 *i)
87{
88 u64 a = le64_to_cpu(i->a);
89 u64 b = le64_to_cpu(i->b);
90
91 b++;
92 if (!b)
93 a++;
94
95 i->a = cpu_to_le64(a);
96 i->b = cpu_to_le64(b);
97}
98
99extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
100 struct blkcipher_desc *desc,
101 struct scatterlist *dst,
102 struct scatterlist *src, unsigned int nbytes);
103
104extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
105 struct blkcipher_desc *desc,
106 struct scatterlist *dst,
107 struct scatterlist *src,
108 unsigned int nbytes);
109
110extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
111 struct blkcipher_desc *desc,
112 struct scatterlist *dst,
113 struct scatterlist *src,
114 unsigned int nbytes);
115
116extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
117 struct blkcipher_desc *desc,
118 struct scatterlist *dst,
119 struct scatterlist *src, unsigned int nbytes);
120
121#endif /* _CRYPTO_GLUE_HELPER_H */
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
deleted file mode 100644
index 0da1d3e2a55..00000000000
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef ASM_X86_SERPENT_AVX_H
2#define ASM_X86_SERPENT_AVX_H
3
4#include <linux/crypto.h>
5#include <crypto/serpent.h>
6
7#define SERPENT_PARALLEL_BLOCKS 8
8
9asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
10 const u8 *src);
11asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
12 const u8 *src);
13
14asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
15 const u8 *src);
16asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
17 const u8 *src, le128 *iv);
18
19#endif
diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/include/asm/crypto/serpent-sse2.h
deleted file mode 100644
index e6e77dffbda..00000000000
--- a/arch/x86/include/asm/crypto/serpent-sse2.h
+++ /dev/null
@@ -1,63 +0,0 @@
1#ifndef ASM_X86_SERPENT_SSE2_H
2#define ASM_X86_SERPENT_SSE2_H
3
4#include <linux/crypto.h>
5#include <crypto/serpent.h>
6
7#ifdef CONFIG_X86_32
8
9#define SERPENT_PARALLEL_BLOCKS 4
10
11asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
12 const u8 *src, bool xor);
13asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
14 const u8 *src);
15
16static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
17 const u8 *src)
18{
19 __serpent_enc_blk_4way(ctx, dst, src, false);
20}
21
22static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
23 const u8 *src)
24{
25 __serpent_enc_blk_4way(ctx, dst, src, true);
26}
27
28static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
29 const u8 *src)
30{
31 serpent_dec_blk_4way(ctx, dst, src);
32}
33
34#else
35
36#define SERPENT_PARALLEL_BLOCKS 8
37
38asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
39 const u8 *src, bool xor);
40asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
41 const u8 *src);
42
43static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
44 const u8 *src)
45{
46 __serpent_enc_blk_8way(ctx, dst, src, false);
47}
48
49static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
50 const u8 *src)
51{
52 __serpent_enc_blk_8way(ctx, dst, src, true);
53}
54
55static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
56 const u8 *src)
57{
58 serpent_dec_blk_8way(ctx, dst, src);
59}
60
61#endif
62
63#endif
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h
deleted file mode 100644
index 878c51ceebb..00000000000
--- a/arch/x86/include/asm/crypto/twofish.h
+++ /dev/null
@@ -1,46 +0,0 @@
1#ifndef ASM_X86_TWOFISH_H
2#define ASM_X86_TWOFISH_H
3
4#include <linux/crypto.h>
5#include <crypto/twofish.h>
6#include <crypto/lrw.h>
7#include <crypto/b128ops.h>
8
9struct twofish_lrw_ctx {
10 struct lrw_table_ctx lrw_table;
11 struct twofish_ctx twofish_ctx;
12};
13
14struct twofish_xts_ctx {
15 struct twofish_ctx tweak_ctx;
16 struct twofish_ctx crypt_ctx;
17};
18
19/* regular block cipher functions from twofish_x86_64 module */
20asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
21 const u8 *src);
22asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
23 const u8 *src);
24
25/* 3-way parallel cipher functions */
26asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
27 const u8 *src, bool xor);
28asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
29 const u8 *src);
30
31/* helpers from twofish_x86_64-3way module */
32extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
33extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
34 le128 *iv);
35extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
36 le128 *iv);
37
38extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
39 unsigned int keylen);
40
41extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm);
42
43extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
44 unsigned int keylen);
45
46#endif /* ASM_X86_TWOFISH_H */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 9476c04ee63..4d447b732d8 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task);
11 11
12static __always_inline struct task_struct *get_current(void) 12static __always_inline struct task_struct *get_current(void)
13{ 13{
14 return this_cpu_read_stable(current_task); 14 return percpu_read_stable(current_task);
15} 15}
16 16
17#define current get_current() 17#define current get_current()
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 4b528a970bd..078ad0caefc 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -2,75 +2,83 @@
2#define _ASM_X86_DEBUGREG_H 2#define _ASM_X86_DEBUGREG_H
3 3
4 4
5#include <linux/bug.h> 5/* Indicate the register numbers for a number of the specific
6#include <uapi/asm/debugreg.h> 6 debug registers. Registers 0-3 contain the addresses we wish to trap on */
7#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
8#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
7 9
8DECLARE_PER_CPU(unsigned long, cpu_dr7); 10#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
11#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
9 12
10#ifndef CONFIG_PARAVIRT 13/* Define a few things for the status register. We can use this to determine
11/* 14 which debugging register was responsible for the trap. The other bits
12 * These special macros can be used to get or set a debugging register 15 are either reserved or not of interest to us. */
13 */ 16
14#define get_debugreg(var, register) \ 17/* Define reserved bits in DR6 which are always set to 1 */
15 (var) = native_get_debugreg(register) 18#define DR6_RESERVED (0xFFFF0FF0)
16#define set_debugreg(value, register) \ 19
17 native_set_debugreg(register, value) 20#define DR_TRAP0 (0x1) /* db0 */
21#define DR_TRAP1 (0x2) /* db1 */
22#define DR_TRAP2 (0x4) /* db2 */
23#define DR_TRAP3 (0x8) /* db3 */
24#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
25
26#define DR_STEP (0x4000) /* single-step */
27#define DR_SWITCH (0x8000) /* task switch */
28
29/* Now define a bunch of things for manipulating the control register.
30 The top two bytes of the control register consist of 4 fields of 4
31 bits - each field corresponds to one of the four debug registers,
32 and indicates what types of access we trap on, and how large the data
33 field is that we are looking at */
34
35#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
36#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
37
38#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
39#define DR_RW_WRITE (0x1)
40#define DR_RW_READ (0x3)
41
42#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
43#define DR_LEN_2 (0x4)
44#define DR_LEN_4 (0xC)
45#define DR_LEN_8 (0x8)
46
47/* The low byte to the control register determine which registers are
48 enabled. There are 4 fields of two bits. One bit is "local", meaning
49 that the processor will reset the bit after a task switch and the other
50 is global meaning that we have to explicitly reset the bit. With linux,
51 you can use either one, since we explicitly zero the register when we enter
52 kernel mode. */
53
54#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
55#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
56#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */
57#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */
58#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
59
60#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
61#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
62
63/* The second byte to the control register has a few special things.
64 We can slow the instruction pipeline for instructions coming via the
65 gdt or the ldt if we want to. I am not sure why this is an advantage */
66
67#ifdef __i386__
68#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
69#else
70#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
18#endif 71#endif
19 72
20static inline unsigned long native_get_debugreg(int regno) 73#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
21{ 74#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
22 unsigned long val = 0; /* Damn you, gcc! */
23
24 switch (regno) {
25 case 0:
26 asm("mov %%db0, %0" :"=r" (val));
27 break;
28 case 1:
29 asm("mov %%db1, %0" :"=r" (val));
30 break;
31 case 2:
32 asm("mov %%db2, %0" :"=r" (val));
33 break;
34 case 3:
35 asm("mov %%db3, %0" :"=r" (val));
36 break;
37 case 6:
38 asm("mov %%db6, %0" :"=r" (val));
39 break;
40 case 7:
41 asm("mov %%db7, %0" :"=r" (val));
42 break;
43 default:
44 BUG();
45 }
46 return val;
47}
48 75
49static inline void native_set_debugreg(int regno, unsigned long value) 76/*
50{ 77 * HW breakpoint additions
51 switch (regno) { 78 */
52 case 0: 79#ifdef __KERNEL__
53 asm("mov %0, %%db0" ::"r" (value)); 80
54 break; 81DECLARE_PER_CPU(unsigned long, cpu_dr7);
55 case 1:
56 asm("mov %0, %%db1" ::"r" (value));
57 break;
58 case 2:
59 asm("mov %0, %%db2" ::"r" (value));
60 break;
61 case 3:
62 asm("mov %0, %%db3" ::"r" (value));
63 break;
64 case 6:
65 asm("mov %0, %%db6" ::"r" (value));
66 break;
67 case 7:
68 asm("mov %0, %%db7" ::"r" (value));
69 break;
70 default:
71 BUG();
72 }
73}
74 82
75static inline void hw_breakpoint_disable(void) 83static inline void hw_breakpoint_disable(void)
76{ 84{
@@ -93,26 +101,6 @@ extern void aout_dump_debugregs(struct user *dump);
93 101
94extern void hw_breakpoint_restore(void); 102extern void hw_breakpoint_restore(void);
95 103
96#ifdef CONFIG_X86_64 104#endif /* __KERNEL__ */
97DECLARE_PER_CPU(int, debug_stack_usage);
98static inline void debug_stack_usage_inc(void)
99{
100 __get_cpu_var(debug_stack_usage)++;
101}
102static inline void debug_stack_usage_dec(void)
103{
104 __get_cpu_var(debug_stack_usage)--;
105}
106int is_debug_stack(unsigned long addr);
107void debug_stack_set_zero(void);
108void debug_stack_reset(void);
109#else /* !X86_64 */
110static inline int is_debug_stack(unsigned long addr) { return 0; }
111static inline void debug_stack_set_zero(void) { }
112static inline void debug_stack_reset(void) { }
113static inline void debug_stack_usage_inc(void) { }
114static inline void debug_stack_usage_dec(void) { }
115#endif /* X86_64 */
116
117 105
118#endif /* _ASM_X86_DEBUGREG_H */ 106#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 8bf1c06070d..41935fadfdf 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -6,7 +6,6 @@
6#include <asm/mmu.h> 6#include <asm/mmu.h>
7 7
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/percpu.h>
10 9
11static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) 10static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
12{ 11{
@@ -36,8 +35,6 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
36 35
37extern struct desc_ptr idt_descr; 36extern struct desc_ptr idt_descr;
38extern gate_desc idt_table[]; 37extern gate_desc idt_table[];
39extern struct desc_ptr nmi_idt_descr;
40extern gate_desc nmi_idt_table[];
41 38
42struct gdt_page { 39struct gdt_page {
43 struct desc_struct gdt[GDT_ENTRIES]; 40 struct desc_struct gdt[GDT_ENTRIES];
@@ -310,16 +307,6 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
310 desc->limit = (limit >> 16) & 0xf; 307 desc->limit = (limit >> 16) & 0xf;
311} 308}
312 309
313#ifdef CONFIG_X86_64
314static inline void set_nmi_gate(int gate, void *addr)
315{
316 gate_desc s;
317
318 pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
319 write_idt_entry(nmi_idt_table, gate, &s);
320}
321#endif
322
323static inline void _set_gate(int gate, unsigned type, void *addr, 310static inline void _set_gate(int gate, unsigned type, void *addr,
324 unsigned dpl, unsigned ist, unsigned seg) 311 unsigned dpl, unsigned ist, unsigned seg)
325{ 312{
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 03dd72957d2..63a2a03d7d5 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -2,8 +2,11 @@
2#define _ASM_X86_DEVICE_H 2#define _ASM_X86_DEVICE_H
3 3
4struct dev_archdata { 4struct dev_archdata {
5#ifdef CONFIG_X86_DEV_DMA_OPS 5#ifdef CONFIG_ACPI
6 struct dma_map_ops *dma_ops; 6 void *acpi_handle;
7#endif
8#ifdef CONFIG_X86_64
9struct dma_map_ops *dma_ops;
7#endif 10#endif
8#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) 11#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
9 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index ced283ac79d..9a2d644c08e 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -4,7 +4,6 @@
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/log2.h>
8 7
9/* 8/*
10 * do_div() is NOT a C function. It wants to return 9 * do_div() is NOT a C function. It wants to return
@@ -22,20 +21,15 @@
22({ \ 21({ \
23 unsigned long __upper, __low, __high, __mod, __base; \ 22 unsigned long __upper, __low, __high, __mod, __base; \
24 __base = (base); \ 23 __base = (base); \
25 if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \ 24 asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
26 __mod = n & (__base - 1); \ 25 __upper = __high; \
27 n >>= ilog2(__base); \ 26 if (__high) { \
28 } else { \ 27 __upper = __high % (__base); \
29 asm("" : "=a" (__low), "=d" (__high) : "A" (n));\ 28 __high = __high / (__base); \
30 __upper = __high; \
31 if (__high) { \
32 __upper = __high % (__base); \
33 __high = __high / (__base); \
34 } \
35 asm("divl %2" : "=a" (__low), "=d" (__mod) \
36 : "rm" (__base), "0" (__low), "1" (__upper)); \
37 asm("" : "=A" (n) : "a" (__low), "d" (__high)); \
38 } \ 29 } \
30 asm("divl %2":"=a" (__low), "=d" (__mod) \
31 : "rm" (__base), "0" (__low), "1" (__upper)); \
32 asm("":"=A" (n) : "a" (__low), "d" (__high)); \
39 __mod; \ 33 __mod; \
40}) 34})
41 35
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
deleted file mode 100644
index c0924165997..00000000000
--- a/arch/x86/include/asm/dma-contiguous.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef ASMX86_DMA_CONTIGUOUS_H
2#define ASMX86_DMA_CONTIGUOUS_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <asm-generic/dma-contiguous.h>
8
9static inline void
10dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
11
12#endif
13#endif
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 808dae63eee..d4c419f883a 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -2,7 +2,7 @@
2#define _ASM_X86_DMA_MAPPING_H 2#define _ASM_X86_DMA_MAPPING_H
3 3
4/* 4/*
5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and 5 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation. 6 * Documentation/DMA-API.txt for documentation.
7 */ 7 */
8 8
@@ -13,7 +13,6 @@
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/swiotlb.h> 14#include <asm/swiotlb.h>
15#include <asm-generic/dma-coherent.h> 15#include <asm-generic/dma-coherent.h>
16#include <linux/dma-contiguous.h>
17 16
18#ifdef CONFIG_ISA 17#ifdef CONFIG_ISA
19# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) 18# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -31,7 +30,7 @@ extern struct dma_map_ops *dma_ops;
31 30
32static inline struct dma_map_ops *get_dma_ops(struct device *dev) 31static inline struct dma_map_ops *get_dma_ops(struct device *dev)
33{ 32{
34#ifndef CONFIG_X86_DEV_DMA_OPS 33#ifdef CONFIG_X86_32
35 return dma_ops; 34 return dma_ops;
36#else 35#else
37 if (unlikely(!dev) || !dev->archdata.dma_ops) 36 if (unlikely(!dev) || !dev->archdata.dma_ops)
@@ -47,7 +46,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
47static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 46static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
48{ 47{
49 struct dma_map_ops *ops = get_dma_ops(dev); 48 struct dma_map_ops *ops = get_dma_ops(dev);
50 debug_dma_mapping_error(dev, dma_addr);
51 if (ops->mapping_error) 49 if (ops->mapping_error)
52 return ops->mapping_error(dev, dma_addr); 50 return ops->mapping_error(dev, dma_addr);
53 51
@@ -61,18 +59,7 @@ extern int dma_supported(struct device *hwdev, u64 mask);
61extern int dma_set_mask(struct device *dev, u64 mask); 59extern int dma_set_mask(struct device *dev, u64 mask);
62 60
63extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 61extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
64 dma_addr_t *dma_addr, gfp_t flag, 62 dma_addr_t *dma_addr, gfp_t flag);
65 struct dma_attrs *attrs);
66
67extern void dma_generic_free_coherent(struct device *dev, size_t size,
68 void *vaddr, dma_addr_t dma_addr,
69 struct dma_attrs *attrs);
70
71#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
72extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
73extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
74extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
75#else
76 63
77static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 64static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
78{ 65{
@@ -91,7 +78,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
91{ 78{
92 return daddr; 79 return daddr;
93} 80}
94#endif /* CONFIG_X86_DMA_REMAP */
95 81
96static inline void 82static inline void
97dma_cache_sync(struct device *dev, void *vaddr, size_t size, 83dma_cache_sync(struct device *dev, void *vaddr, size_t size,
@@ -125,11 +111,9 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
125 return gfp; 111 return gfp;
126} 112}
127 113
128#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
129
130static inline void * 114static inline void *
131dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 115dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
132 gfp_t gfp, struct dma_attrs *attrs) 116 gfp_t gfp)
133{ 117{
134 struct dma_map_ops *ops = get_dma_ops(dev); 118 struct dma_map_ops *ops = get_dma_ops(dev);
135 void *memory; 119 void *memory;
@@ -145,21 +129,18 @@ dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
145 if (!is_device_dma_capable(dev)) 129 if (!is_device_dma_capable(dev))
146 return NULL; 130 return NULL;
147 131
148 if (!ops->alloc) 132 if (!ops->alloc_coherent)
149 return NULL; 133 return NULL;
150 134
151 memory = ops->alloc(dev, size, dma_handle, 135 memory = ops->alloc_coherent(dev, size, dma_handle,
152 dma_alloc_coherent_gfp_flags(dev, gfp), attrs); 136 dma_alloc_coherent_gfp_flags(dev, gfp));
153 debug_dma_alloc_coherent(dev, size, *dma_handle, memory); 137 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
154 138
155 return memory; 139 return memory;
156} 140}
157 141
158#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 142static inline void dma_free_coherent(struct device *dev, size_t size,
159 143 void *vaddr, dma_addr_t bus)
160static inline void dma_free_attrs(struct device *dev, size_t size,
161 void *vaddr, dma_addr_t bus,
162 struct dma_attrs *attrs)
163{ 144{
164 struct dma_map_ops *ops = get_dma_ops(dev); 145 struct dma_map_ops *ops = get_dma_ops(dev);
165 146
@@ -169,8 +150,8 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
169 return; 150 return;
170 151
171 debug_dma_free_coherent(dev, size, vaddr, bus); 152 debug_dma_free_coherent(dev, size, vaddr, bus);
172 if (ops->free) 153 if (ops->free_coherent)
173 ops->free(dev, size, vaddr, bus, attrs); 154 ops->free_coherent(dev, size, vaddr, bus);
174} 155}
175 156
176#endif 157#endif
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index f6f15986df6..32609919931 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -27,7 +27,6 @@
27#define CFI_REMEMBER_STATE .cfi_remember_state 27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state 28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined 29#define CFI_UNDEFINED .cfi_undefined
30#define CFI_ESCAPE .cfi_escape
31 30
32#ifdef CONFIG_AS_CFI_SIGNAL_FRAME 31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
33#define CFI_SIGNAL_FRAME .cfi_signal_frame 32#define CFI_SIGNAL_FRAME .cfi_signal_frame
@@ -69,7 +68,6 @@
69#define CFI_REMEMBER_STATE cfi_ignore 68#define CFI_REMEMBER_STATE cfi_ignore
70#define CFI_RESTORE_STATE cfi_ignore 69#define CFI_RESTORE_STATE cfi_ignore
71#define CFI_UNDEFINED cfi_ignore 70#define CFI_UNDEFINED cfi_ignore
72#define CFI_ESCAPE cfi_ignore
73#define CFI_SIGNAL_FRAME cfi_ignore 71#define CFI_SIGNAL_FRAME cfi_ignore
74 72
75#endif 73#endif
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index cccd07fa5e3..908b96957d8 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -1,14 +1,81 @@
1#ifndef _ASM_X86_E820_H 1#ifndef _ASM_X86_E820_H
2#define _ASM_X86_E820_H 2#define _ASM_X86_E820_H
3#define E820MAP 0x2d0 /* our map */
4#define E820MAX 128 /* number of entries in E820MAP */
3 5
6/*
7 * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
8 * constrained space in the zeropage. If we have more nodes than
9 * that, and if we've booted off EFI firmware, then the EFI tables
10 * passed us from the EFI firmware can list more nodes. Size our
11 * internal memory map tables to have room for these additional
12 * nodes, based on up to three entries per node for which the
13 * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
14 * plus E820MAX, allowing space for the possible duplicate E820
15 * entries that might need room in the same arrays, prior to the
16 * call to sanitize_e820_map() to remove duplicates. The allowance
17 * of three memory map entries per node is "enough" entries for
18 * the initial hardware platform motivating this mechanism to make
19 * use of additional EFI map entries. Future platforms may want
20 * to allow more than three entries per node or otherwise refine
21 * this size.
22 */
23
24/*
25 * Odd: 'make headers_check' complains about numa.h if I try
26 * to collapse the next two #ifdef lines to a single line:
27 * #if defined(__KERNEL__) && defined(CONFIG_EFI)
28 */
29#ifdef __KERNEL__
4#ifdef CONFIG_EFI 30#ifdef CONFIG_EFI
5#include <linux/numa.h> 31#include <linux/numa.h>
6#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES) 32#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
7#else /* ! CONFIG_EFI */ 33#else /* ! CONFIG_EFI */
8#define E820_X_MAX E820MAX 34#define E820_X_MAX E820MAX
9#endif 35#endif
10#include <uapi/asm/e820.h> 36#else /* ! __KERNEL__ */
37#define E820_X_MAX E820MAX
38#endif
39
40#define E820NR 0x1e8 /* # entries in E820MAP */
41
42#define E820_RAM 1
43#define E820_RESERVED 2
44#define E820_ACPI 3
45#define E820_NVS 4
46#define E820_UNUSABLE 5
47
48/*
49 * reserved RAM used by kernel itself
50 * if CONFIG_INTEL_TXT is enabled, memory of this type will be
51 * included in the S3 integrity calculation and so should not include
52 * any memory that BIOS might alter over the S3 transition
53 */
54#define E820_RESERVED_KERN 128
55
11#ifndef __ASSEMBLY__ 56#ifndef __ASSEMBLY__
57#include <linux/types.h>
58struct e820entry {
59 __u64 addr; /* start of memory segment */
60 __u64 size; /* size of memory segment */
61 __u32 type; /* type of memory segment */
62} __attribute__((packed));
63
64struct e820map {
65 __u32 nr_map;
66 struct e820entry map[E820_X_MAX];
67};
68
69#define ISA_START_ADDRESS 0xa0000
70#define ISA_END_ADDRESS 0x100000
71
72#define BIOS_BEGIN 0x000a0000
73#define BIOS_END 0x00100000
74
75#define BIOS_ROM_BASE 0xffe00000
76#define BIOS_ROM_END 0xffffffff
77
78#ifdef __KERNEL__
12/* see comment in arch/x86/kernel/e820.c */ 79/* see comment in arch/x86/kernel/e820.c */
13extern struct e820map e820; 80extern struct e820map e820;
14extern struct e820map e820_saved; 81extern struct e820map e820_saved;
@@ -50,7 +117,7 @@ static inline void early_memtest(unsigned long start, unsigned long end)
50 117
51extern unsigned long e820_end_of_ram_pfn(void); 118extern unsigned long e820_end_of_ram_pfn(void);
52extern unsigned long e820_end_of_low_ram_pfn(void); 119extern unsigned long e820_end_of_low_ram_pfn(void);
53extern u64 early_reserve_e820(u64 sizet, u64 align); 120extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
54 121
55void memblock_x86_fill(void); 122void memblock_x86_fill(void);
56void memblock_find_dma_reserve(void); 123void memblock_find_dma_reserve(void);
@@ -70,8 +137,13 @@ static inline bool is_ISA_range(u64 s, u64 e)
70 return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS; 137 return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS;
71} 138}
72 139
140#endif /* __KERNEL__ */
73#endif /* __ASSEMBLY__ */ 141#endif /* __ASSEMBLY__ */
142
143#ifdef __KERNEL__
74#include <linux/ioport.h> 144#include <linux/ioport.h>
75 145
76#define HIGH_MEMORY (1024*1024) 146#define HIGH_MEMORY (1024*1024)
147#endif /* __KERNEL__ */
148
77#endif /* _ASM_X86_E820_H */ 149#endif /* _ASM_X86_E820_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 6e8fdf5ad11..7093e4a6a0b 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -3,8 +3,6 @@
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5 5
6#define EFI_LOADER_SIGNATURE "EL32"
7
8extern unsigned long asmlinkage efi_call_phys(void *, ...); 6extern unsigned long asmlinkage efi_call_phys(void *, ...);
9 7
10#define efi_call_phys0(f) efi_call_phys(f) 8#define efi_call_phys0(f) efi_call_phys(f)
@@ -35,12 +33,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
35#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 33#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
36 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 34 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
37 35
38#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size) 36#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
39 37
40#else /* !CONFIG_X86_32 */ 38#else /* !CONFIG_X86_32 */
41 39
42#define EFI_LOADER_SIGNATURE "EL64"
43
44extern u64 efi_call0(void *fp); 40extern u64 efi_call0(void *fp);
45extern u64 efi_call1(void *fp, u64 arg1); 41extern u64 efi_call1(void *fp, u64 arg1);
46extern u64 efi_call2(void *fp, u64 arg1, u64 arg2); 42extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
@@ -89,17 +85,15 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 85 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
90 86
91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 87extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
92 u32 type, u64 attribute); 88 u32 type);
93 89
94#endif /* CONFIG_X86_32 */ 90#endif /* CONFIG_X86_32 */
95 91
96extern int add_efi_memmap; 92extern int add_efi_memmap;
97extern void efi_set_executable(efi_memory_desc_t *md, bool executable); 93extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
98extern int efi_memblock_x86_reserve_range(void); 94extern void efi_memblock_x86_reserve_range(void);
99extern void efi_call_phys_prelog(void); 95extern void efi_call_phys_prelog(void);
100extern void efi_call_phys_epilog(void); 96extern void efi_call_phys_epilog(void);
101extern void efi_unmap_memmap(void);
102extern void efi_memory_uc(u64 addr, unsigned long size);
103 97
104#ifndef CONFIG_EFI 98#ifndef CONFIG_EFI
105/* 99/*
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 9c999c1674f..f2ad2163109 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -4,7 +4,6 @@
4/* 4/*
5 * ELF register definitions.. 5 * ELF register definitions..
6 */ 6 */
7#include <linux/thread_info.h>
8 7
9#include <asm/ptrace.h> 8#include <asm/ptrace.h>
10#include <asm/user.h> 9#include <asm/user.h>
@@ -84,6 +83,7 @@ extern unsigned int vdso_enabled;
84 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) 83 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
85 84
86#include <asm/processor.h> 85#include <asm/processor.h>
86#include <asm/system.h>
87 87
88#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
89#include <asm/desc.h> 89#include <asm/desc.h>
@@ -155,12 +155,7 @@ do { \
155#define elf_check_arch(x) \ 155#define elf_check_arch(x) \
156 ((x)->e_machine == EM_X86_64) 156 ((x)->e_machine == EM_X86_64)
157 157
158#define compat_elf_check_arch(x) \ 158#define compat_elf_check_arch(x) elf_check_arch_ia32(x)
159 (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
160
161#if __USER32_DS != __USER_DS
162# error "The following code assumes __USER32_DS == __USER_DS"
163#endif
164 159
165static inline void elf_common_init(struct thread_struct *t, 160static inline void elf_common_init(struct thread_struct *t,
166 struct pt_regs *regs, const u16 ds) 161 struct pt_regs *regs, const u16 ds)
@@ -183,9 +178,8 @@ static inline void elf_common_init(struct thread_struct *t,
183void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); 178void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
184#define compat_start_thread start_thread_ia32 179#define compat_start_thread start_thread_ia32
185 180
186void set_personality_ia32(bool); 181void set_personality_ia32(void);
187#define COMPAT_SET_PERSONALITY(ex) \ 182#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32()
188 set_personality_ia32((ex).e_machine == EM_X86_64)
189 183
190#define COMPAT_ELF_PLATFORM ("i686") 184#define COMPAT_ELF_PLATFORM ("i686")
191 185
@@ -292,7 +286,7 @@ do { \
292#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */ 286#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
293 287
294/* 1GB for 64bit, 8MB for 32bit */ 288/* 1GB for 64bit, 8MB for 32bit */
295#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff) 289#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
296 290
297#define ARCH_DLINFO \ 291#define ARCH_DLINFO \
298do { \ 292do { \
@@ -301,20 +295,9 @@ do { \
301 (unsigned long)current->mm->context.vdso); \ 295 (unsigned long)current->mm->context.vdso); \
302} while (0) 296} while (0)
303 297
304#define ARCH_DLINFO_X32 \
305do { \
306 if (vdso_enabled) \
307 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
308 (unsigned long)current->mm->context.vdso); \
309} while (0)
310
311#define AT_SYSINFO 32 298#define AT_SYSINFO 32
312 299
313#define COMPAT_ARCH_DLINFO \ 300#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32)
314if (test_thread_flag(TIF_X32)) \
315 ARCH_DLINFO_X32; \
316else \
317 ARCH_DLINFO_IA32(sysctl_vsyscall32)
318 301
319#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) 302#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
320 303
@@ -330,8 +313,6 @@ struct linux_binprm;
330#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 313#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
331extern int arch_setup_additional_pages(struct linux_binprm *bprm, 314extern int arch_setup_additional_pages(struct linux_binprm *bprm,
332 int uses_interp); 315 int uses_interp);
333extern int x32_setup_additional_pages(struct linux_binprm *bprm,
334 int uses_interp);
335 316
336extern int syscall32_setup_pages(struct linux_binprm *, int exstack); 317extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
337#define compat_arch_setup_additional_pages syscall32_setup_pages 318#define compat_arch_setup_additional_pages syscall32_setup_pages
@@ -339,32 +320,4 @@ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
339extern unsigned long arch_randomize_brk(struct mm_struct *mm); 320extern unsigned long arch_randomize_brk(struct mm_struct *mm);
340#define arch_randomize_brk arch_randomize_brk 321#define arch_randomize_brk arch_randomize_brk
341 322
342/*
343 * True on X86_32 or when emulating IA32 on X86_64
344 */
345static inline int mmap_is_ia32(void)
346{
347#ifdef CONFIG_X86_32
348 return 1;
349#endif
350#ifdef CONFIG_IA32_EMULATION
351 if (test_thread_flag(TIF_ADDR32))
352 return 1;
353#endif
354 return 0;
355}
356
357/* Do not change the values. See get_align_mask() */
358enum align_flags {
359 ALIGN_VA_32 = BIT(0),
360 ALIGN_VA_64 = BIT(1),
361};
362
363struct va_alignment {
364 int flags;
365 unsigned long mask;
366} ____cacheline_aligned;
367
368extern struct va_alignment va_align;
369extern unsigned long align_vdso_addr(unsigned long);
370#endif /* _ASM_X86_ELF_H */ 323#endif /* _ASM_X86_ELF_H */
diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
index 75ce3f47d20..cc70c1c78ca 100644
--- a/arch/x86/include/asm/emergency-restart.h
+++ b/arch/x86/include/asm/emergency-restart.h
@@ -4,7 +4,9 @@
4enum reboot_type { 4enum reboot_type {
5 BOOT_TRIPLE = 't', 5 BOOT_TRIPLE = 't',
6 BOOT_KBD = 'k', 6 BOOT_KBD = 'k',
7#ifdef CONFIG_X86_32
7 BOOT_BIOS = 'b', 8 BOOT_BIOS = 'b',
9#endif
8 BOOT_ACPI = 'a', 10 BOOT_ACPI = 'a',
9 BOOT_EFI = 'e', 11 BOOT_EFI = 'e',
10 BOOT_CF9 = 'p', 12 BOOT_CF9 = 'p',
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 40afa0005c6..0baa628e330 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -15,6 +15,15 @@ BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) 15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
16BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) 16BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
17BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) 17BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
18
19.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
20 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
21.if NUM_INVALIDATE_TLB_VECTORS > \idx
22BUILD_INTERRUPT3(invalidate_interrupt\idx,
23 (INVALIDATE_TLB_VECTOR_START)+\idx,
24 smp_invalidate_interrupt)
25.endif
26.endr
18#endif 27#endif
19 28
20BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) 29BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
diff --git a/arch/x86/include/asm/exec.h b/arch/x86/include/asm/exec.h
deleted file mode 100644
index 54c2e1db274..00000000000
--- a/arch/x86/include/asm/exec.h
+++ /dev/null
@@ -1 +0,0 @@
1/* define arch_align_stack() here */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index a09c2857106..460c74e4852 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -19,7 +19,6 @@
19#include <asm/acpi.h> 19#include <asm/acpi.h>
20#include <asm/apicdef.h> 20#include <asm/apicdef.h>
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/pvclock.h>
23#ifdef CONFIG_X86_32 22#ifdef CONFIG_X86_32
24#include <linux/threads.h> 23#include <linux/threads.h>
25#include <asm/kmap_types.h> 24#include <asm/kmap_types.h>
@@ -82,10 +81,6 @@ enum fixed_addresses {
82 VVAR_PAGE, 81 VVAR_PAGE,
83 VSYSCALL_HPET, 82 VSYSCALL_HPET,
84#endif 83#endif
85#ifdef CONFIG_PARAVIRT_CLOCK
86 PVCLOCK_FIXMAP_BEGIN,
87 PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
88#endif
89 FIX_DBGP_BASE, 84 FIX_DBGP_BASE,
90 FIX_EARLYCON_MEM_BASE, 85 FIX_EARLYCON_MEM_BASE,
91#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 86#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
@@ -122,7 +117,7 @@ enum fixed_addresses {
122#endif 117#endif
123 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ 118 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
124 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ 119 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
125#ifdef CONFIG_X86_INTEL_MID 120#ifdef CONFIG_X86_MRST
126 FIX_LNW_VRTC, 121 FIX_LNW_VRTC,
127#endif 122#endif
128 __end_of_permanent_fixed_addresses, 123 __end_of_permanent_fixed_addresses,
diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
index d3d74698dce..dbe82a5c5ea 100644
--- a/arch/x86/include/asm/floppy.h
+++ b/arch/x86/include/asm/floppy.h
@@ -99,7 +99,7 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
99 virtual_dma_residue += virtual_dma_count; 99 virtual_dma_residue += virtual_dma_count;
100 virtual_dma_count = 0; 100 virtual_dma_count = 0;
101#ifdef TRACE_FLPY_INT 101#ifdef TRACE_FLPY_INT
102 printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", 102 printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
103 virtual_dma_count, virtual_dma_residue, calls, bytes, 103 virtual_dma_count, virtual_dma_residue, calls, bytes,
104 dma_wait); 104 dma_wait);
105 calls = 0; 105 calls = 0;
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
deleted file mode 100644
index 41ab26ea656..00000000000
--- a/arch/x86/include/asm/fpu-internal.h
+++ /dev/null
@@ -1,619 +0,0 @@
1/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
9
10#ifndef _FPU_INTERNAL_H
11#define _FPU_INTERNAL_H
12
13#include <linux/kernel_stat.h>
14#include <linux/regset.h>
15#include <linux/compat.h>
16#include <linux/slab.h>
17#include <asm/asm.h>
18#include <asm/cpufeature.h>
19#include <asm/processor.h>
20#include <asm/sigcontext.h>
21#include <asm/user.h>
22#include <asm/uaccess.h>
23#include <asm/xsave.h>
24#include <asm/smap.h>
25
26#ifdef CONFIG_X86_64
27# include <asm/sigcontext32.h>
28# include <asm/user32.h>
29int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
30 compat_sigset_t *set, struct pt_regs *regs);
31int ia32_setup_frame(int sig, struct k_sigaction *ka,
32 compat_sigset_t *set, struct pt_regs *regs);
33#else
34# define user_i387_ia32_struct user_i387_struct
35# define user32_fxsr_struct user_fxsr_struct
36# define ia32_setup_frame __setup_frame
37# define ia32_setup_rt_frame __setup_rt_frame
38#endif
39
40extern unsigned int mxcsr_feature_mask;
41extern void fpu_init(void);
42extern void eager_fpu_init(void);
43
44DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
45
46extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
47 struct task_struct *tsk);
48extern void convert_to_fxsr(struct task_struct *tsk,
49 const struct user_i387_ia32_struct *env);
50
51extern user_regset_active_fn fpregs_active, xfpregs_active;
52extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
53 xstateregs_get;
54extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
55 xstateregs_set;
56
57/*
58 * xstateregs_active == fpregs_active. Please refer to the comment
59 * at the definition of fpregs_active.
60 */
61#define xstateregs_active fpregs_active
62
63#ifdef CONFIG_MATH_EMULATION
64# define HAVE_HWFP (boot_cpu_data.hard_math)
65extern void finit_soft_fpu(struct i387_soft_struct *soft);
66#else
67# define HAVE_HWFP 1
68static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
69#endif
70
71static inline int is_ia32_compat_frame(void)
72{
73 return config_enabled(CONFIG_IA32_EMULATION) &&
74 test_thread_flag(TIF_IA32);
75}
76
77static inline int is_ia32_frame(void)
78{
79 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
80}
81
82static inline int is_x32_frame(void)
83{
84 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
85}
86
87#define X87_FSW_ES (1 << 7) /* Exception Summary */
88
89static __always_inline __pure bool use_eager_fpu(void)
90{
91 return static_cpu_has(X86_FEATURE_EAGER_FPU);
92}
93
94static __always_inline __pure bool use_xsaveopt(void)
95{
96 return static_cpu_has(X86_FEATURE_XSAVEOPT);
97}
98
99static __always_inline __pure bool use_xsave(void)
100{
101 return static_cpu_has(X86_FEATURE_XSAVE);
102}
103
104static __always_inline __pure bool use_fxsr(void)
105{
106 return static_cpu_has(X86_FEATURE_FXSR);
107}
108
109static inline void fx_finit(struct i387_fxsave_struct *fx)
110{
111 memset(fx, 0, xstate_size);
112 fx->cwd = 0x37f;
113 fx->mxcsr = MXCSR_DEFAULT;
114}
115
116extern void __sanitize_i387_state(struct task_struct *);
117
118static inline void sanitize_i387_state(struct task_struct *tsk)
119{
120 if (!use_xsaveopt())
121 return;
122 __sanitize_i387_state(tsk);
123}
124
125#define user_insn(insn, output, input...) \
126({ \
127 int err; \
128 asm volatile(ASM_STAC "\n" \
129 "1:" #insn "\n\t" \
130 "2: " ASM_CLAC "\n" \
131 ".section .fixup,\"ax\"\n" \
132 "3: movl $-1,%[err]\n" \
133 " jmp 2b\n" \
134 ".previous\n" \
135 _ASM_EXTABLE(1b, 3b) \
136 : [err] "=r" (err), output \
137 : "0"(0), input); \
138 err; \
139})
140
141#define check_insn(insn, output, input...) \
142({ \
143 int err; \
144 asm volatile("1:" #insn "\n\t" \
145 "2:\n" \
146 ".section .fixup,\"ax\"\n" \
147 "3: movl $-1,%[err]\n" \
148 " jmp 2b\n" \
149 ".previous\n" \
150 _ASM_EXTABLE(1b, 3b) \
151 : [err] "=r" (err), output \
152 : "0"(0), input); \
153 err; \
154})
155
156static inline int fsave_user(struct i387_fsave_struct __user *fx)
157{
158 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
159}
160
161static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
162{
163 if (config_enabled(CONFIG_X86_32))
164 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
165 else if (config_enabled(CONFIG_AS_FXSAVEQ))
166 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
167
168 /* See comment in fpu_fxsave() below. */
169 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
170}
171
172static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
173{
174 if (config_enabled(CONFIG_X86_32))
175 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
176 else if (config_enabled(CONFIG_AS_FXSAVEQ))
177 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
178
179 /* See comment in fpu_fxsave() below. */
180 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
181 "m" (*fx));
182}
183
184static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
185{
186 if (config_enabled(CONFIG_X86_32))
187 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
188 else if (config_enabled(CONFIG_AS_FXSAVEQ))
189 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
190
191 /* See comment in fpu_fxsave() below. */
192 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
193 "m" (*fx));
194}
195
196static inline int frstor_checking(struct i387_fsave_struct *fx)
197{
198 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
199}
200
201static inline int frstor_user(struct i387_fsave_struct __user *fx)
202{
203 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
204}
205
206static inline void fpu_fxsave(struct fpu *fpu)
207{
208 if (config_enabled(CONFIG_X86_32))
209 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
210 else if (config_enabled(CONFIG_AS_FXSAVEQ))
211 asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
212 else {
213 /* Using "rex64; fxsave %0" is broken because, if the memory
214 * operand uses any extended registers for addressing, a second
215 * REX prefix will be generated (to the assembler, rex64
216 * followed by semicolon is a separate instruction), and hence
217 * the 64-bitness is lost.
218 *
219 * Using "fxsaveq %0" would be the ideal choice, but is only
220 * supported starting with gas 2.16.
221 *
222 * Using, as a workaround, the properly prefixed form below
223 * isn't accepted by any binutils version so far released,
224 * complaining that the same type of prefix is used twice if
225 * an extended register is needed for addressing (fix submitted
226 * to mainline 2005-11-21).
227 *
228 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
229 *
230 * This, however, we can work around by forcing the compiler to
231 * select an addressing mode that doesn't require extended
232 * registers.
233 */
234 asm volatile( "rex64/fxsave (%[fx])"
235 : "=m" (fpu->state->fxsave)
236 : [fx] "R" (&fpu->state->fxsave));
237 }
238}
239
240/*
241 * These must be called with preempt disabled. Returns
242 * 'true' if the FPU state is still intact.
243 */
244static inline int fpu_save_init(struct fpu *fpu)
245{
246 if (use_xsave()) {
247 fpu_xsave(fpu);
248
249 /*
250 * xsave header may indicate the init state of the FP.
251 */
252 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
253 return 1;
254 } else if (use_fxsr()) {
255 fpu_fxsave(fpu);
256 } else {
257 asm volatile("fnsave %[fx]; fwait"
258 : [fx] "=m" (fpu->state->fsave));
259 return 0;
260 }
261
262 /*
263 * If exceptions are pending, we need to clear them so
264 * that we don't randomly get exceptions later.
265 *
266 * FIXME! Is this perhaps only true for the old-style
267 * irq13 case? Maybe we could leave the x87 state
268 * intact otherwise?
269 */
270 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
271 asm volatile("fnclex");
272 return 0;
273 }
274 return 1;
275}
276
277static inline int __save_init_fpu(struct task_struct *tsk)
278{
279 return fpu_save_init(&tsk->thread.fpu);
280}
281
282static inline int fpu_restore_checking(struct fpu *fpu)
283{
284 if (use_xsave())
285 return fpu_xrstor_checking(&fpu->state->xsave);
286 else if (use_fxsr())
287 return fxrstor_checking(&fpu->state->fxsave);
288 else
289 return frstor_checking(&fpu->state->fsave);
290}
291
292static inline int restore_fpu_checking(struct task_struct *tsk)
293{
294 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
295 is pending. Clear the x87 state here by setting it to fixed
296 values. "m" is a random variable that should be in L1 */
297 alternative_input(
298 ASM_NOP8 ASM_NOP2,
299 "emms\n\t" /* clear stack tags */
300 "fildl %P[addr]", /* set F?P to defined value */
301 X86_FEATURE_FXSAVE_LEAK,
302 [addr] "m" (tsk->thread.fpu.has_fpu));
303
304 return fpu_restore_checking(&tsk->thread.fpu);
305}
306
307/*
308 * Software FPU state helpers. Careful: these need to
309 * be preemption protection *and* they need to be
310 * properly paired with the CR0.TS changes!
311 */
312static inline int __thread_has_fpu(struct task_struct *tsk)
313{
314 return tsk->thread.fpu.has_fpu;
315}
316
317/* Must be paired with an 'stts' after! */
318static inline void __thread_clear_has_fpu(struct task_struct *tsk)
319{
320 tsk->thread.fpu.has_fpu = 0;
321 this_cpu_write(fpu_owner_task, NULL);
322}
323
324/* Must be paired with a 'clts' before! */
325static inline void __thread_set_has_fpu(struct task_struct *tsk)
326{
327 tsk->thread.fpu.has_fpu = 1;
328 this_cpu_write(fpu_owner_task, tsk);
329}
330
331/*
332 * Encapsulate the CR0.TS handling together with the
333 * software flag.
334 *
335 * These generally need preemption protection to work,
336 * do try to avoid using these on their own.
337 */
338static inline void __thread_fpu_end(struct task_struct *tsk)
339{
340 __thread_clear_has_fpu(tsk);
341 if (!use_eager_fpu())
342 stts();
343}
344
345static inline void __thread_fpu_begin(struct task_struct *tsk)
346{
347 if (!use_eager_fpu())
348 clts();
349 __thread_set_has_fpu(tsk);
350}
351
352static inline void __drop_fpu(struct task_struct *tsk)
353{
354 if (__thread_has_fpu(tsk)) {
355 /* Ignore delayed exceptions from user space */
356 asm volatile("1: fwait\n"
357 "2:\n"
358 _ASM_EXTABLE(1b, 2b));
359 __thread_fpu_end(tsk);
360 }
361}
362
363static inline void drop_fpu(struct task_struct *tsk)
364{
365 /*
366 * Forget coprocessor state..
367 */
368 preempt_disable();
369 tsk->fpu_counter = 0;
370 __drop_fpu(tsk);
371 clear_used_math();
372 preempt_enable();
373}
374
375static inline void drop_init_fpu(struct task_struct *tsk)
376{
377 if (!use_eager_fpu())
378 drop_fpu(tsk);
379 else {
380 if (use_xsave())
381 xrstor_state(init_xstate_buf, -1);
382 else
383 fxrstor_checking(&init_xstate_buf->i387);
384 }
385}
386
387/*
388 * FPU state switching for scheduling.
389 *
390 * This is a two-stage process:
391 *
392 * - switch_fpu_prepare() saves the old state and
393 * sets the new state of the CR0.TS bit. This is
394 * done within the context of the old process.
395 *
396 * - switch_fpu_finish() restores the new state as
397 * necessary.
398 */
399typedef struct { int preload; } fpu_switch_t;
400
401/*
402 * Must be run with preemption disabled: this clears the fpu_owner_task,
403 * on this CPU.
404 *
405 * This will disable any lazy FPU state restore of the current FPU state,
406 * but if the current thread owns the FPU, it will still be saved by.
407 */
408static inline void __cpu_disable_lazy_restore(unsigned int cpu)
409{
410 per_cpu(fpu_owner_task, cpu) = NULL;
411}
412
413static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
414{
415 return new == this_cpu_read_stable(fpu_owner_task) &&
416 cpu == new->thread.fpu.last_cpu;
417}
418
419static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
420{
421 fpu_switch_t fpu;
422
423 /*
424 * If the task has used the math, pre-load the FPU on xsave processors
425 * or if the past 5 consecutive context-switches used math.
426 */
427 fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
428 new->fpu_counter > 5);
429 if (__thread_has_fpu(old)) {
430 if (!__save_init_fpu(old))
431 cpu = ~0;
432 old->thread.fpu.last_cpu = cpu;
433 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
434
435 /* Don't change CR0.TS if we just switch! */
436 if (fpu.preload) {
437 new->fpu_counter++;
438 __thread_set_has_fpu(new);
439 prefetch(new->thread.fpu.state);
440 } else if (!use_eager_fpu())
441 stts();
442 } else {
443 old->fpu_counter = 0;
444 old->thread.fpu.last_cpu = ~0;
445 if (fpu.preload) {
446 new->fpu_counter++;
447 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
448 fpu.preload = 0;
449 else
450 prefetch(new->thread.fpu.state);
451 __thread_fpu_begin(new);
452 }
453 }
454 return fpu;
455}
456
457/*
458 * By the time this gets called, we've already cleared CR0.TS and
459 * given the process the FPU if we are going to preload the FPU
460 * state - all we need to do is to conditionally restore the register
461 * state itself.
462 */
463static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
464{
465 if (fpu.preload) {
466 if (unlikely(restore_fpu_checking(new)))
467 drop_init_fpu(new);
468 }
469}
470
471/*
472 * Signal frame handlers...
473 */
474extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
475extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
476
477static inline int xstate_sigframe_size(void)
478{
479 return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
480}
481
482static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
483{
484 void __user *buf_fx = buf;
485 int size = xstate_sigframe_size();
486
487 if (ia32_frame && use_fxsr()) {
488 buf_fx = buf + sizeof(struct i387_fsave_struct);
489 size += sizeof(struct i387_fsave_struct);
490 }
491
492 return __restore_xstate_sig(buf, buf_fx, size);
493}
494
495/*
496 * Need to be preemption-safe.
497 *
498 * NOTE! user_fpu_begin() must be used only immediately before restoring
499 * it. This function does not do any save/restore on their own.
500 */
501static inline void user_fpu_begin(void)
502{
503 preempt_disable();
504 if (!user_has_fpu())
505 __thread_fpu_begin(current);
506 preempt_enable();
507}
508
509static inline void __save_fpu(struct task_struct *tsk)
510{
511 if (use_xsave())
512 xsave_state(&tsk->thread.fpu.state->xsave, -1);
513 else
514 fpu_fxsave(&tsk->thread.fpu);
515}
516
517/*
518 * These disable preemption on their own and are safe
519 */
520static inline void save_init_fpu(struct task_struct *tsk)
521{
522 WARN_ON_ONCE(!__thread_has_fpu(tsk));
523
524 if (use_eager_fpu()) {
525 __save_fpu(tsk);
526 return;
527 }
528
529 preempt_disable();
530 __save_init_fpu(tsk);
531 __thread_fpu_end(tsk);
532 preempt_enable();
533}
534
535/*
536 * i387 state interaction
537 */
538static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
539{
540 if (cpu_has_fxsr) {
541 return tsk->thread.fpu.state->fxsave.cwd;
542 } else {
543 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
544 }
545}
546
547static inline unsigned short get_fpu_swd(struct task_struct *tsk)
548{
549 if (cpu_has_fxsr) {
550 return tsk->thread.fpu.state->fxsave.swd;
551 } else {
552 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
553 }
554}
555
556static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
557{
558 if (cpu_has_xmm) {
559 return tsk->thread.fpu.state->fxsave.mxcsr;
560 } else {
561 return MXCSR_DEFAULT;
562 }
563}
564
565static bool fpu_allocated(struct fpu *fpu)
566{
567 return fpu->state != NULL;
568}
569
570static inline int fpu_alloc(struct fpu *fpu)
571{
572 if (fpu_allocated(fpu))
573 return 0;
574 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
575 if (!fpu->state)
576 return -ENOMEM;
577 WARN_ON((unsigned long)fpu->state & 15);
578 return 0;
579}
580
581static inline void fpu_free(struct fpu *fpu)
582{
583 if (fpu->state) {
584 kmem_cache_free(task_xstate_cachep, fpu->state);
585 fpu->state = NULL;
586 }
587}
588
589static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
590{
591 if (use_eager_fpu()) {
592 memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
593 __save_fpu(dst);
594 } else {
595 struct fpu *dfpu = &dst->thread.fpu;
596 struct fpu *sfpu = &src->thread.fpu;
597
598 unlazy_fpu(src);
599 memcpy(dfpu->state, sfpu->state, xstate_size);
600 }
601}
602
603static inline unsigned long
604alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
605 unsigned long *size)
606{
607 unsigned long frame_size = xstate_sigframe_size();
608
609 *buf_fx = sp = round_down(sp - frame_size, 64);
610 if (ia32_frame && use_fxsr()) {
611 frame_size += sizeof(struct i387_fsave_struct);
612 sp -= sizeof(struct i387_fsave_struct);
613 }
614
615 *size = frame_size;
616 return sp;
617}
618
619#endif
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 9a25b522d37..268c783ab1c 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -3,54 +3,37 @@
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5 5
6 /* skip is set if the stack was already partially adjusted */ 6 .macro MCOUNT_SAVE_FRAME
7 .macro MCOUNT_SAVE_FRAME skip=0 7 /* taken from glibc */
8 /* 8 subq $0x38, %rsp
9 * We add enough stack to save all regs. 9 movq %rax, (%rsp)
10 */ 10 movq %rcx, 8(%rsp)
11 subq $(SS+8-\skip), %rsp 11 movq %rdx, 16(%rsp)
12 movq %rax, RAX(%rsp) 12 movq %rsi, 24(%rsp)
13 movq %rcx, RCX(%rsp) 13 movq %rdi, 32(%rsp)
14 movq %rdx, RDX(%rsp) 14 movq %r8, 40(%rsp)
15 movq %rsi, RSI(%rsp) 15 movq %r9, 48(%rsp)
16 movq %rdi, RDI(%rsp)
17 movq %r8, R8(%rsp)
18 movq %r9, R9(%rsp)
19 /* Move RIP to its proper location */
20 movq SS+8(%rsp), %rdx
21 movq %rdx, RIP(%rsp)
22 .endm 16 .endm
23 17
24 .macro MCOUNT_RESTORE_FRAME skip=0 18 .macro MCOUNT_RESTORE_FRAME
25 movq R9(%rsp), %r9 19 movq 48(%rsp), %r9
26 movq R8(%rsp), %r8 20 movq 40(%rsp), %r8
27 movq RDI(%rsp), %rdi 21 movq 32(%rsp), %rdi
28 movq RSI(%rsp), %rsi 22 movq 24(%rsp), %rsi
29 movq RDX(%rsp), %rdx 23 movq 16(%rsp), %rdx
30 movq RCX(%rsp), %rcx 24 movq 8(%rsp), %rcx
31 movq RAX(%rsp), %rax 25 movq (%rsp), %rax
32 addq $(SS+8-\skip), %rsp 26 addq $0x38, %rsp
33 .endm 27 .endm
34 28
35#endif 29#endif
36 30
37#ifdef CONFIG_FUNCTION_TRACER 31#ifdef CONFIG_FUNCTION_TRACER
38#ifdef CC_USING_FENTRY 32#define MCOUNT_ADDR ((long)(mcount))
39# define MCOUNT_ADDR ((long)(__fentry__))
40#else
41# define MCOUNT_ADDR ((long)(mcount))
42#endif
43#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 33#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
44 34
45#ifdef CONFIG_DYNAMIC_FTRACE
46#define ARCH_SUPPORTS_FTRACE_OPS 1
47#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
48#endif
49
50#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
51extern void mcount(void); 36extern void mcount(void);
52extern atomic_t modifying_ftrace_code;
53extern void __fentry__(void);
54 37
55static inline unsigned long ftrace_call_adjust(unsigned long addr) 38static inline unsigned long ftrace_call_adjust(unsigned long addr)
56{ 39{
@@ -67,8 +50,6 @@ struct dyn_arch_ftrace {
67 /* No extra data needed for x86 */ 50 /* No extra data needed for x86 */
68}; 51};
69 52
70int ftrace_int3_handler(struct pt_regs *regs);
71
72#endif /* CONFIG_DYNAMIC_FTRACE */ 53#endif /* CONFIG_DYNAMIC_FTRACE */
73#endif /* __ASSEMBLY__ */ 54#endif /* __ASSEMBLY__ */
74#endif /* CONFIG_FUNCTION_TRACER */ 55#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index be27ba1e947..d09bb03653f 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -9,13 +9,11 @@
9#include <asm/asm.h> 9#include <asm/asm.h>
10#include <asm/errno.h> 10#include <asm/errno.h>
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/smap.h> 12#include <asm/system.h>
13 13
14#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ 14#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
15 asm volatile("\t" ASM_STAC "\n" \ 15 asm volatile("1:\t" insn "\n" \
16 "1:\t" insn "\n" \ 16 "2:\t.section .fixup,\"ax\"\n" \
17 "2:\t" ASM_CLAC "\n" \
18 "\t.section .fixup,\"ax\"\n" \
19 "3:\tmov\t%3, %1\n" \ 17 "3:\tmov\t%3, %1\n" \
20 "\tjmp\t2b\n" \ 18 "\tjmp\t2b\n" \
21 "\t.previous\n" \ 19 "\t.previous\n" \
@@ -24,14 +22,12 @@
24 : "i" (-EFAULT), "0" (oparg), "1" (0)) 22 : "i" (-EFAULT), "0" (oparg), "1" (0))
25 23
26#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ 24#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
27 asm volatile("\t" ASM_STAC "\n" \ 25 asm volatile("1:\tmovl %2, %0\n" \
28 "1:\tmovl %2, %0\n" \
29 "\tmovl\t%0, %3\n" \ 26 "\tmovl\t%0, %3\n" \
30 "\t" insn "\n" \ 27 "\t" insn "\n" \
31 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ 28 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
32 "\tjnz\t1b\n" \ 29 "\tjnz\t1b\n" \
33 "3:\t" ASM_CLAC "\n" \ 30 "3:\t.section .fixup,\"ax\"\n" \
34 "\t.section .fixup,\"ax\"\n" \
35 "4:\tmov\t%5, %1\n" \ 31 "4:\tmov\t%5, %1\n" \
36 "\tjmp\t3b\n" \ 32 "\tjmp\t3b\n" \
37 "\t.previous\n" \ 33 "\t.previous\n" \
@@ -55,6 +51,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
55 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 51 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
56 return -EFAULT; 52 return -EFAULT;
57 53
54#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
55 /* Real i386 machines can only support FUTEX_OP_SET */
56 if (op != FUTEX_OP_SET && boot_cpu_data.x86 == 3)
57 return -ENOSYS;
58#endif
59
58 pagefault_disable(); 60 pagefault_disable();
59 61
60 switch (op) { 62 switch (op) {
@@ -112,13 +114,17 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
112{ 114{
113 int ret = 0; 115 int ret = 0;
114 116
117#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
118 /* Real i386 machines have no cmpxchg instruction */
119 if (boot_cpu_data.x86 == 3)
120 return -ENOSYS;
121#endif
122
115 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 123 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
116 return -EFAULT; 124 return -EFAULT;
117 125
118 asm volatile("\t" ASM_STAC "\n" 126 asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
119 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" 127 "2:\t.section .fixup, \"ax\"\n"
120 "2:\t" ASM_CLAC "\n"
121 "\t.section .fixup, \"ax\"\n"
122 "3:\tmov %3, %0\n" 128 "3:\tmov %3, %0\n"
123 "\tjmp 2b\n" 129 "\tjmp 2b\n"
124 "\t.previous\n" 130 "\t.previous\n"
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index b3799d88ffc..91d915a6525 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -1,4 +1,53 @@
1#ifndef __LINUX_GPIO_H 1/*
2#warning Include linux/gpio.h instead of asm/gpio.h 2 * Generic GPIO API implementation for x86.
3#include <linux/gpio.h> 3 *
4#endif 4 * Derived from the generic GPIO API for powerpc:
5 *
6 * Copyright (c) 2007-2008 MontaVista Software, Inc.
7 *
8 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef _ASM_X86_GPIO_H
17#define _ASM_X86_GPIO_H
18
19#include <asm-generic/gpio.h>
20
21#ifdef CONFIG_GPIOLIB
22
23/*
24 * Just call gpiolib.
25 */
26static inline int gpio_get_value(unsigned int gpio)
27{
28 return __gpio_get_value(gpio);
29}
30
31static inline void gpio_set_value(unsigned int gpio, int value)
32{
33 __gpio_set_value(gpio, value);
34}
35
36static inline int gpio_cansleep(unsigned int gpio)
37{
38 return __gpio_cansleep(gpio);
39}
40
41static inline int gpio_to_irq(unsigned int gpio)
42{
43 return __gpio_to_irq(gpio);
44}
45
46static inline int irq_to_gpio(unsigned int irq)
47{
48 return -EINVAL;
49}
50
51#endif /* CONFIG_GPIOLIB */
52
53#endif /* _ASM_X86_GPIO_H */
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 81f04cee5f7..55e4de613f0 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -7,10 +7,10 @@
7typedef struct { 7typedef struct {
8 unsigned int __softirq_pending; 8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */ 9 unsigned int __nmi_count; /* arch dependent */
10 unsigned int irq0_irqs;
10#ifdef CONFIG_X86_LOCAL_APIC 11#ifdef CONFIG_X86_LOCAL_APIC
11 unsigned int apic_timer_irqs; /* arch dependent */ 12 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq_spurious_count; 13 unsigned int irq_spurious_count;
13 unsigned int icr_read_retry_count;
14#endif 14#endif
15 unsigned int x86_platform_ipis; /* arch dependent */ 15 unsigned int x86_platform_ipis; /* arch dependent */
16 unsigned int apic_perf_irqs; 16 unsigned int apic_perf_irqs;
@@ -18,10 +18,6 @@ typedef struct {
18#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
19 unsigned int irq_resched_count; 19 unsigned int irq_resched_count;
20 unsigned int irq_call_count; 20 unsigned int irq_call_count;
21 /*
22 * irq_tlb_count is double-counted in irq_call_count, so it must be
23 * subtracted from irq_call_count when displaying irq_call_count
24 */
25 unsigned int irq_tlb_count; 21 unsigned int irq_tlb_count;
26#endif 22#endif
27#ifdef CONFIG_X86_THERMAL_VECTOR 23#ifdef CONFIG_X86_THERMAL_VECTOR
@@ -39,15 +35,14 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
39 35
40#define __ARCH_IRQ_STAT 36#define __ARCH_IRQ_STAT
41 37
42#define inc_irq_stat(member) this_cpu_inc(irq_stat.member) 38#define inc_irq_stat(member) percpu_inc(irq_stat.member)
43 39
44#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) 40#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
45 41
46#define __ARCH_SET_SOFTIRQ_PENDING 42#define __ARCH_SET_SOFTIRQ_PENDING
47 43
48#define set_softirq_pending(x) \ 44#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
49 this_cpu_write(irq_stat.__softirq_pending, (x)) 45#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
50#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
51 46
52extern void ack_bad_irq(unsigned int irq); 47extern void ack_bad_irq(unsigned int irq);
53 48
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 302a323b3f6..3bd04022fd0 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -61,7 +61,7 @@ void *kmap(struct page *page);
61void kunmap(struct page *page); 61void kunmap(struct page *page);
62 62
63void *kmap_atomic_prot(struct page *page, pgprot_t prot); 63void *kmap_atomic_prot(struct page *page, pgprot_t prot);
64void *kmap_atomic(struct page *page); 64void *__kmap_atomic(struct page *page);
65void __kunmap_atomic(void *kvaddr); 65void __kunmap_atomic(void *kvaddr);
66void *kmap_atomic_pfn(unsigned long pfn); 66void *kmap_atomic_pfn(unsigned long pfn);
67void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); 67void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 434e2106cc8..2c392d663dc 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -35,6 +35,8 @@
35#define HPET_ID_NUMBER_SHIFT 8 35#define HPET_ID_NUMBER_SHIFT 8
36#define HPET_ID_VENDOR_SHIFT 16 36#define HPET_ID_VENDOR_SHIFT 16
37 37
38#define HPET_ID_VENDOR_8086 0x8086
39
38#define HPET_CFG_ENABLE 0x001 40#define HPET_CFG_ENABLE 0x001
39#define HPET_CFG_LEGACY 0x002 41#define HPET_CFG_LEGACY 0x002
40#define HPET_LEGACY_8254 2 42#define HPET_LEGACY_8254 2
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index bdd35dbd060..439a9acc132 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -90,8 +90,4 @@ static inline void arch_release_hugepage(struct page *page)
90{ 90{
91} 91}
92 92
93static inline void arch_clear_hugepage_flags(struct page *page)
94{
95}
96
97#endif /* _ASM_X86_HUGETLB_H */ 93#endif /* _ASM_X86_HUGETLB_H */
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index ef1c4d2d41e..824ca07860d 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -1,8 +1,7 @@
1#ifndef _I386_HW_BREAKPOINT_H 1#ifndef _I386_HW_BREAKPOINT_H
2#define _I386_HW_BREAKPOINT_H 2#define _I386_HW_BREAKPOINT_H
3 3
4#include <uapi/asm/hw_breakpoint.h> 4#ifdef __KERNEL__
5
6#define __ARCH_HW_BREAKPOINT_H 5#define __ARCH_HW_BREAKPOINT_H
7 6
8/* 7/*
@@ -72,4 +71,6 @@ extern int arch_bp_generic_fields(int x86_len, int x86_type,
72 71
73extern struct pmu perf_ops_bp; 72extern struct pmu perf_ops_bp;
74 73
74#endif /* __KERNEL__ */
75#endif /* _I386_HW_BREAKPOINT_H */ 75#endif /* _I386_HW_BREAKPOINT_H */
76
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index b518c750993..7a15153c675 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -49,7 +49,6 @@ extern const struct hypervisor_x86 *x86_hyper;
49extern const struct hypervisor_x86 x86_hyper_vmware; 49extern const struct hypervisor_x86 x86_hyper_vmware;
50extern const struct hypervisor_x86 x86_hyper_ms_hyperv; 50extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
51extern const struct hypervisor_x86 x86_hyper_xen_hvm; 51extern const struct hypervisor_x86 x86_hyper_xen_hvm;
52extern const struct hypervisor_x86 x86_hyper_kvm;
53 52
54static inline bool hypervisor_x2apic_available(void) 53static inline bool hypervisor_x2apic_available(void)
55{ 54{
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index ed8089d6909..c9e09ea0564 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -13,44 +13,324 @@
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14 14
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/kernel_stat.h>
17#include <linux/regset.h>
16#include <linux/hardirq.h> 18#include <linux/hardirq.h>
19#include <linux/slab.h>
20#include <asm/asm.h>
21#include <asm/cpufeature.h>
22#include <asm/processor.h>
23#include <asm/sigcontext.h>
24#include <asm/user.h>
25#include <asm/uaccess.h>
26#include <asm/xsave.h>
17 27
18struct pt_regs; 28extern unsigned int sig_xstate_size;
19struct user_i387_struct; 29extern void fpu_init(void);
20 30extern void mxcsr_feature_mask_init(void);
21extern int init_fpu(struct task_struct *child); 31extern int init_fpu(struct task_struct *child);
22extern void fpu_finit(struct fpu *fpu); 32extern asmlinkage void math_state_restore(void);
33extern void __math_state_restore(void);
23extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 34extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
24extern void math_state_restore(void);
25 35
26extern bool irq_fpu_usable(void); 36extern user_regset_active_fn fpregs_active, xfpregs_active;
37extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
38 xstateregs_get;
39extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
40 xstateregs_set;
27 41
28/* 42/*
29 * Careful: __kernel_fpu_begin/end() must be called with preempt disabled 43 * xstateregs_active == fpregs_active. Please refer to the comment
30 * and they don't touch the preempt state on their own. 44 * at the definition of fpregs_active.
31 * If you enable preemption after __kernel_fpu_begin(), preempt notifier 45 */
32 * should call the __kernel_fpu_end() to prevent the kernel/user FPU 46#define xstateregs_active fpregs_active
33 * state from getting corrupted. KVM for example uses this model. 47
34 * 48extern struct _fpx_sw_bytes fx_sw_reserved;
35 * All other cases use kernel_fpu_begin/end() which disable preemption 49#ifdef CONFIG_IA32_EMULATION
36 * during kernel FPU usage. 50extern unsigned int sig_xstate_ia32_size;
51extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
52struct _fpstate_ia32;
53struct _xstate_ia32;
54extern int save_i387_xstate_ia32(void __user *buf);
55extern int restore_i387_xstate_ia32(void __user *buf);
56#endif
57
58#ifdef CONFIG_MATH_EMULATION
59extern void finit_soft_fpu(struct i387_soft_struct *soft);
60#else
61static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
62#endif
63
64#define X87_FSW_ES (1 << 7) /* Exception Summary */
65
66static __always_inline __pure bool use_xsaveopt(void)
67{
68 return static_cpu_has(X86_FEATURE_XSAVEOPT);
69}
70
71static __always_inline __pure bool use_xsave(void)
72{
73 return static_cpu_has(X86_FEATURE_XSAVE);
74}
75
76static __always_inline __pure bool use_fxsr(void)
77{
78 return static_cpu_has(X86_FEATURE_FXSR);
79}
80
81extern void __sanitize_i387_state(struct task_struct *);
82
83static inline void sanitize_i387_state(struct task_struct *tsk)
84{
85 if (!use_xsaveopt())
86 return;
87 __sanitize_i387_state(tsk);
88}
89
90#ifdef CONFIG_X86_64
91static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
92{
93 int err;
94
95 /* See comment in fxsave() below. */
96#ifdef CONFIG_AS_FXSAVEQ
97 asm volatile("1: fxrstorq %[fx]\n\t"
98 "2:\n"
99 ".section .fixup,\"ax\"\n"
100 "3: movl $-1,%[err]\n"
101 " jmp 2b\n"
102 ".previous\n"
103 _ASM_EXTABLE(1b, 3b)
104 : [err] "=r" (err)
105 : [fx] "m" (*fx), "0" (0));
106#else
107 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
108 "2:\n"
109 ".section .fixup,\"ax\"\n"
110 "3: movl $-1,%[err]\n"
111 " jmp 2b\n"
112 ".previous\n"
113 _ASM_EXTABLE(1b, 3b)
114 : [err] "=r" (err)
115 : [fx] "R" (fx), "m" (*fx), "0" (0));
116#endif
117 return err;
118}
119
120static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
121{
122 int err;
123
124 /*
125 * Clear the bytes not touched by the fxsave and reserved
126 * for the SW usage.
127 */
128 err = __clear_user(&fx->sw_reserved,
129 sizeof(struct _fpx_sw_bytes));
130 if (unlikely(err))
131 return -EFAULT;
132
133 /* See comment in fxsave() below. */
134#ifdef CONFIG_AS_FXSAVEQ
135 asm volatile("1: fxsaveq %[fx]\n\t"
136 "2:\n"
137 ".section .fixup,\"ax\"\n"
138 "3: movl $-1,%[err]\n"
139 " jmp 2b\n"
140 ".previous\n"
141 _ASM_EXTABLE(1b, 3b)
142 : [err] "=r" (err), [fx] "=m" (*fx)
143 : "0" (0));
144#else
145 asm volatile("1: rex64/fxsave (%[fx])\n\t"
146 "2:\n"
147 ".section .fixup,\"ax\"\n"
148 "3: movl $-1,%[err]\n"
149 " jmp 2b\n"
150 ".previous\n"
151 _ASM_EXTABLE(1b, 3b)
152 : [err] "=r" (err), "=m" (*fx)
153 : [fx] "R" (fx), "0" (0));
154#endif
155 if (unlikely(err) &&
156 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
157 err = -EFAULT;
158 /* No need to clear here because the caller clears USED_MATH */
159 return err;
160}
161
162static inline void fpu_fxsave(struct fpu *fpu)
163{
164 /* Using "rex64; fxsave %0" is broken because, if the memory operand
165 uses any extended registers for addressing, a second REX prefix
166 will be generated (to the assembler, rex64 followed by semicolon
167 is a separate instruction), and hence the 64-bitness is lost. */
168
169#ifdef CONFIG_AS_FXSAVEQ
170 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
171 starting with gas 2.16. */
172 __asm__ __volatile__("fxsaveq %0"
173 : "=m" (fpu->state->fxsave));
174#else
175 /* Using, as a workaround, the properly prefixed form below isn't
176 accepted by any binutils version so far released, complaining that
177 the same type of prefix is used twice if an extended register is
178 needed for addressing (fix submitted to mainline 2005-11-21).
179 asm volatile("rex64/fxsave %0"
180 : "=m" (fpu->state->fxsave));
181 This, however, we can work around by forcing the compiler to select
182 an addressing mode that doesn't require extended registers. */
183 asm volatile("rex64/fxsave (%[fx])"
184 : "=m" (fpu->state->fxsave)
185 : [fx] "R" (&fpu->state->fxsave));
186#endif
187}
188
189#else /* CONFIG_X86_32 */
190
191/* perform fxrstor iff the processor has extended states, otherwise frstor */
192static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
193{
194 /*
195 * The "nop" is needed to make the instructions the same
196 * length.
197 */
198 alternative_input(
199 "nop ; frstor %1",
200 "fxrstor %1",
201 X86_FEATURE_FXSR,
202 "m" (*fx));
203
204 return 0;
205}
206
207static inline void fpu_fxsave(struct fpu *fpu)
208{
209 asm volatile("fxsave %[fx]"
210 : [fx] "=m" (fpu->state->fxsave));
211}
212
213#endif /* CONFIG_X86_64 */
214
215/* We need a safe address that is cheap to find and that is already
216 in L1 during context switch. The best choices are unfortunately
217 different for UP and SMP */
218#ifdef CONFIG_SMP
219#define safe_address (__per_cpu_offset[0])
220#else
221#define safe_address (kstat_cpu(0).cpustat.user)
222#endif
223
224/*
225 * These must be called with preempt disabled
226 */
227static inline void fpu_save_init(struct fpu *fpu)
228{
229 if (use_xsave()) {
230 fpu_xsave(fpu);
231
232 /*
233 * xsave header may indicate the init state of the FP.
234 */
235 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
236 return;
237 } else if (use_fxsr()) {
238 fpu_fxsave(fpu);
239 } else {
240 asm volatile("fnsave %[fx]; fwait"
241 : [fx] "=m" (fpu->state->fsave));
242 return;
243 }
244
245 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
246 asm volatile("fnclex");
247
248 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
249 is pending. Clear the x87 state here by setting it to fixed
250 values. safe_address is a random variable that should be in L1 */
251 alternative_input(
252 ASM_NOP8 ASM_NOP2,
253 "emms\n\t" /* clear stack tags */
254 "fildl %P[addr]", /* set F?P to defined value */
255 X86_FEATURE_FXSAVE_LEAK,
256 [addr] "m" (safe_address));
257}
258
259static inline void __save_init_fpu(struct task_struct *tsk)
260{
261 fpu_save_init(&tsk->thread.fpu);
262 task_thread_info(tsk)->status &= ~TS_USEDFPU;
263}
264
265static inline int fpu_fxrstor_checking(struct fpu *fpu)
266{
267 return fxrstor_checking(&fpu->state->fxsave);
268}
269
270static inline int fpu_restore_checking(struct fpu *fpu)
271{
272 if (use_xsave())
273 return fpu_xrstor_checking(fpu);
274 else
275 return fpu_fxrstor_checking(fpu);
276}
277
278static inline int restore_fpu_checking(struct task_struct *tsk)
279{
280 return fpu_restore_checking(&tsk->thread.fpu);
281}
282
283/*
284 * Signal frame handlers...
37 */ 285 */
38extern void __kernel_fpu_begin(void); 286extern int save_i387_xstate(void __user *buf);
39extern void __kernel_fpu_end(void); 287extern int restore_i387_xstate(void __user *buf);
288
289static inline void __unlazy_fpu(struct task_struct *tsk)
290{
291 if (task_thread_info(tsk)->status & TS_USEDFPU) {
292 __save_init_fpu(tsk);
293 stts();
294 } else
295 tsk->fpu_counter = 0;
296}
297
298static inline void __clear_fpu(struct task_struct *tsk)
299{
300 if (task_thread_info(tsk)->status & TS_USEDFPU) {
301 /* Ignore delayed exceptions from user space */
302 asm volatile("1: fwait\n"
303 "2:\n"
304 _ASM_EXTABLE(1b, 2b));
305 task_thread_info(tsk)->status &= ~TS_USEDFPU;
306 stts();
307 }
308}
40 309
41static inline void kernel_fpu_begin(void) 310static inline void kernel_fpu_begin(void)
42{ 311{
43 WARN_ON_ONCE(!irq_fpu_usable()); 312 struct thread_info *me = current_thread_info();
44 preempt_disable(); 313 preempt_disable();
45 __kernel_fpu_begin(); 314 if (me->status & TS_USEDFPU)
315 __save_init_fpu(me->task);
316 else
317 clts();
46} 318}
47 319
48static inline void kernel_fpu_end(void) 320static inline void kernel_fpu_end(void)
49{ 321{
50 __kernel_fpu_end(); 322 stts();
51 preempt_enable(); 323 preempt_enable();
52} 324}
53 325
326static inline bool irq_fpu_usable(void)
327{
328 struct pt_regs *regs;
329
330 return !in_interrupt() || !(regs = get_irq_regs()) || \
331 user_mode(regs) || (read_cr0() & X86_CR0_TS);
332}
333
54/* 334/*
55 * Some instructions like VIA's padlock instructions generate a spurious 335 * Some instructions like VIA's padlock instructions generate a spurious
56 * DNA fault but don't modify SSE registers. And these instructions 336 * DNA fault but don't modify SSE registers. And these instructions
@@ -83,21 +363,90 @@ static inline void irq_ts_restore(int TS_state)
83} 363}
84 364
85/* 365/*
86 * The question "does this thread have fpu access?" 366 * These disable preemption on their own and are safe
87 * is slightly racy, since preemption could come in 367 */
88 * and revoke it immediately after the test. 368static inline void save_init_fpu(struct task_struct *tsk)
89 * 369{
90 * However, even in that very unlikely scenario, 370 preempt_disable();
91 * we can just assume we have FPU access - typically 371 __save_init_fpu(tsk);
92 * to save the FP state - we'll just take a #NM 372 stts();
93 * fault and get the FPU access back. 373 preempt_enable();
374}
375
376static inline void unlazy_fpu(struct task_struct *tsk)
377{
378 preempt_disable();
379 __unlazy_fpu(tsk);
380 preempt_enable();
381}
382
383static inline void clear_fpu(struct task_struct *tsk)
384{
385 preempt_disable();
386 __clear_fpu(tsk);
387 preempt_enable();
388}
389
390/*
391 * i387 state interaction
94 */ 392 */
95static inline int user_has_fpu(void) 393static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
394{
395 if (cpu_has_fxsr) {
396 return tsk->thread.fpu.state->fxsave.cwd;
397 } else {
398 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
399 }
400}
401
402static inline unsigned short get_fpu_swd(struct task_struct *tsk)
403{
404 if (cpu_has_fxsr) {
405 return tsk->thread.fpu.state->fxsave.swd;
406 } else {
407 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
408 }
409}
410
411static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
412{
413 if (cpu_has_xmm) {
414 return tsk->thread.fpu.state->fxsave.mxcsr;
415 } else {
416 return MXCSR_DEFAULT;
417 }
418}
419
420static bool fpu_allocated(struct fpu *fpu)
421{
422 return fpu->state != NULL;
423}
424
425static inline int fpu_alloc(struct fpu *fpu)
426{
427 if (fpu_allocated(fpu))
428 return 0;
429 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
430 if (!fpu->state)
431 return -ENOMEM;
432 WARN_ON((unsigned long)fpu->state & 15);
433 return 0;
434}
435
436static inline void fpu_free(struct fpu *fpu)
437{
438 if (fpu->state) {
439 kmem_cache_free(task_xstate_cachep, fpu->state);
440 fpu->state = NULL;
441 }
442}
443
444static inline void fpu_copy(struct fpu *dst, struct fpu *src)
96{ 445{
97 return current->thread.fpu.has_fpu; 446 memcpy(dst->state, src->state, xstate_size);
98} 447}
99 448
100extern void unlazy_fpu(struct task_struct *tsk); 449extern void fpu_finit(struct fpu *fpu);
101 450
102#endif /* __ASSEMBLY__ */ 451#endif /* __ASSEMBLY__ */
103 452
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 4c6da2e4bb1..1f7e6251728 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -29,23 +29,20 @@ struct old_sigaction32 {
29 unsigned int sa_restorer; /* Another 32 bit pointer */ 29 unsigned int sa_restorer; /* Another 32 bit pointer */
30}; 30};
31 31
32typedef struct sigaltstack_ia32 {
33 unsigned int ss_sp;
34 int ss_flags;
35 unsigned int ss_size;
36} stack_ia32_t;
37
32struct ucontext_ia32 { 38struct ucontext_ia32 {
33 unsigned int uc_flags; 39 unsigned int uc_flags;
34 unsigned int uc_link; 40 unsigned int uc_link;
35 compat_stack_t uc_stack; 41 stack_ia32_t uc_stack;
36 struct sigcontext_ia32 uc_mcontext; 42 struct sigcontext_ia32 uc_mcontext;
37 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 43 compat_sigset_t uc_sigmask; /* mask last for extensibility */
38}; 44};
39 45
40struct ucontext_x32 {
41 unsigned int uc_flags;
42 unsigned int uc_link;
43 compat_stack_t uc_stack;
44 unsigned int uc__pad0; /* needed for alignment */
45 struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
46 compat_sigset_t uc_sigmask; /* mask last for extensibility */
47};
48
49/* This matches struct stat64 in glibc2.2, hence the absolutely 46/* This matches struct stat64 in glibc2.2, hence the absolutely
50 * insane amounts of padding around dev_t's. 47 * insane amounts of padding around dev_t's.
51 */ 48 */
@@ -80,6 +77,58 @@ struct stat64 {
80 unsigned long long st_ino; 77 unsigned long long st_ino;
81} __attribute__((packed)); 78} __attribute__((packed));
82 79
80typedef struct compat_siginfo {
81 int si_signo;
82 int si_errno;
83 int si_code;
84
85 union {
86 int _pad[((128 / sizeof(int)) - 3)];
87
88 /* kill() */
89 struct {
90 unsigned int _pid; /* sender's pid */
91 unsigned int _uid; /* sender's uid */
92 } _kill;
93
94 /* POSIX.1b timers */
95 struct {
96 compat_timer_t _tid; /* timer id */
97 int _overrun; /* overrun count */
98 compat_sigval_t _sigval; /* same as below */
99 int _sys_private; /* not to be passed to user */
100 int _overrun_incr; /* amount to add to overrun */
101 } _timer;
102
103 /* POSIX.1b signals */
104 struct {
105 unsigned int _pid; /* sender's pid */
106 unsigned int _uid; /* sender's uid */
107 compat_sigval_t _sigval;
108 } _rt;
109
110 /* SIGCHLD */
111 struct {
112 unsigned int _pid; /* which child */
113 unsigned int _uid; /* sender's uid */
114 int _status; /* exit code */
115 compat_clock_t _utime;
116 compat_clock_t _stime;
117 } _sigchld;
118
119 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
120 struct {
121 unsigned int _addr; /* faulting insn/memory ref. */
122 } _sigfault;
123
124 /* SIGPOLL */
125 struct {
126 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
127 int _fd;
128 } _sigpoll;
129 } _sifields;
130} compat_siginfo_t;
131
83#define IA32_STACK_TOP IA32_PAGE_OFFSET 132#define IA32_STACK_TOP IA32_PAGE_OFFSET
84 133
85#ifdef __KERNEL__ 134#ifdef __KERNEL__
diff --git a/arch/x86/include/asm/ia32_unistd.h b/arch/x86/include/asm/ia32_unistd.h
index b0d5716ca1e..976f6ecd2ce 100644
--- a/arch/x86/include/asm/ia32_unistd.h
+++ b/arch/x86/include/asm/ia32_unistd.h
@@ -2,10 +2,17 @@
2#define _ASM_X86_IA32_UNISTD_H 2#define _ASM_X86_IA32_UNISTD_H
3 3
4/* 4/*
5 * This file contains the system call numbers of the ia32 compat ABI, 5 * This file contains the system call numbers of the ia32 port,
6 * this is for the kernel only. 6 * this is for the kernel only.
7 * Only add syscalls here where some part of the kernel needs to know
8 * the number. This should be otherwise in sync with asm-x86/unistd_32.h. -AK
7 */ 9 */
8#define __SYSCALL_ia32_NR(x) (x) 10
9#include <asm/unistd_32_ia32.h> 11#define __NR_ia32_restart_syscall 0
12#define __NR_ia32_exit 1
13#define __NR_ia32_read 3
14#define __NR_ia32_write 4
15#define __NR_ia32_sigreturn 119
16#define __NR_ia32_rt_sigreturn 173
10 17
11#endif /* _ASM_X86_IA32_UNISTD_H */ 18#endif /* _ASM_X86_IA32_UNISTD_H */
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index c5d1785373e..f1e4268ef3c 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -1,20 +1,12 @@
1#ifndef _ASM_X86_IDLE_H 1#ifndef _ASM_X86_IDLE_H
2#define _ASM_X86_IDLE_H 2#define _ASM_X86_IDLE_H
3 3
4#define IDLE_START 1
5#define IDLE_END 2
6
7struct notifier_block;
8void idle_notifier_register(struct notifier_block *n);
9void idle_notifier_unregister(struct notifier_block *n);
10
11#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
12void enter_idle(void); 5void enter_idle(void);
13void exit_idle(void); 6void exit_idle(void);
14#else /* !CONFIG_X86_64 */ 7#else /* !CONFIG_X86_64 */
15static inline void enter_idle(void) { } 8static inline void enter_idle(void) { }
16static inline void exit_idle(void) { } 9static inline void exit_idle(void) { }
17static inline void __exit_idle(void) { }
18#endif /* CONFIG_X86_64 */ 10#endif /* CONFIG_X86_64 */
19 11
20void amd_e400_remove_cpu(int cpu); 12void amd_e400_remove_cpu(int cpu);
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
index 74a2e312e8a..205b063e3e3 100644
--- a/arch/x86/include/asm/inat.h
+++ b/arch/x86/include/asm/inat.h
@@ -97,12 +97,11 @@
97 97
98/* Attribute search APIs */ 98/* Attribute search APIs */
99extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); 99extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
100extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
101extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, 100extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
102 int lpfx_id, 101 insn_byte_t last_pfx,
103 insn_attr_t esc_attr); 102 insn_attr_t esc_attr);
104extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, 103extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
105 int lpfx_id, 104 insn_byte_t last_pfx,
106 insn_attr_t esc_attr); 105 insn_attr_t esc_attr);
107extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, 106extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
108 insn_byte_t vex_m, 107 insn_byte_t vex_m,
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index adcc0ae73d0..8dbe353e41e 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -5,8 +5,6 @@
5extern void __init early_ioremap_page_table_range_init(void); 5extern void __init early_ioremap_page_table_range_init(void);
6#endif 6#endif
7 7
8extern void __init zone_sizes_init(void);
9
10extern unsigned long __init 8extern unsigned long __init
11kernel_physical_mapping_init(unsigned long start, 9kernel_physical_mapping_init(unsigned long start,
12 unsigned long end, 10 unsigned long end,
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 48eb30a8606..88c765e1641 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -96,6 +96,12 @@ struct insn {
96#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ 96#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
97#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ 97#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
98 98
99/* The last prefix is needed for two-byte and three-byte opcodes */
100static inline insn_byte_t insn_last_prefix(struct insn *insn)
101{
102 return insn->prefixes.bytes[3];
103}
104
99extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); 105extern void insn_init(struct insn *insn, const void *kaddr, int x86_64);
100extern void insn_get_prefixes(struct insn *insn); 106extern void insn_get_prefixes(struct insn *insn);
101extern void insn_get_opcode(struct insn *insn); 107extern void insn_get_opcode(struct insn *insn);
@@ -131,13 +137,6 @@ static inline int insn_is_avx(struct insn *insn)
131 return (insn->vex_prefix.value != 0); 137 return (insn->vex_prefix.value != 0);
132} 138}
133 139
134/* Ensure this instruction is decoded completely */
135static inline int insn_complete(struct insn *insn)
136{
137 return insn->opcode.got && insn->modrm.got && insn->sib.got &&
138 insn->displacement.got && insn->immediate.got;
139}
140
141static inline insn_byte_t insn_vex_m_bits(struct insn *insn) 140static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
142{ 141{
143 if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ 142 if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
@@ -154,18 +153,6 @@ static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
154 return X86_VEX_P(insn->vex_prefix.bytes[2]); 153 return X86_VEX_P(insn->vex_prefix.bytes[2]);
155} 154}
156 155
157/* Get the last prefix id from last prefix or VEX prefix */
158static inline int insn_last_prefix_id(struct insn *insn)
159{
160 if (insn_is_avx(insn))
161 return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
162
163 if (insn->prefixes.bytes[3])
164 return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
165
166 return 0;
167}
168
169/* Offset of each field from kaddr */ 156/* Offset of each field from kaddr */
170static inline int insn_offset_rex_prefix(struct insn *insn) 157static inline int insn_offset_rex_prefix(struct insn *insn)
171{ 158{
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 925b605eb5c..29f66793cc5 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -1,17 +1,11 @@
1#ifndef _ASM_X86_INTEL_SCU_IPC_H_ 1#ifndef _ASM_X86_INTEL_SCU_IPC_H_
2#define _ASM_X86_INTEL_SCU_IPC_H_ 2#define _ASM_X86_INTEL_SCU_IPC_H_
3 3
4#include <linux/notifier.h> 4#define IPCMSG_VRTC 0xFA /* Set vRTC device */
5 5
6#define IPCMSG_WARM_RESET 0xF0 6/* Command id associated with message IPCMSG_VRTC */
7#define IPCMSG_COLD_RESET 0xF1 7#define IPC_CMD_VRTC_SETTIME 1 /* Set time */
8#define IPCMSG_SOFT_RESET 0xF2 8#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
9#define IPCMSG_COLD_BOOT 0xF3
10
11#define IPCMSG_VRTC 0xFA /* Set vRTC device */
12 /* Command id associated with message IPCMSG_VRTC */
13 #define IPC_CMD_VRTC_SETTIME 1 /* Set time */
14 #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
15 9
16/* Read single register */ 10/* Read single register */
17int intel_scu_ipc_ioread8(u16 addr, u8 *data); 11int intel_scu_ipc_ioread8(u16 addr, u8 *data);
@@ -50,24 +44,4 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
50/* Update FW version */ 44/* Update FW version */
51int intel_scu_ipc_fw_update(u8 *buffer, u32 length); 45int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
52 46
53extern struct blocking_notifier_head intel_scu_notifier;
54
55static inline void intel_scu_notifier_add(struct notifier_block *nb)
56{
57 blocking_notifier_chain_register(&intel_scu_notifier, nb);
58}
59
60static inline void intel_scu_notifier_remove(struct notifier_block *nb)
61{
62 blocking_notifier_chain_unregister(&intel_scu_notifier, nb);
63}
64
65static inline int intel_scu_notifier_post(unsigned long v, void *p)
66{
67 return blocking_notifier_call_chain(&intel_scu_notifier, v, p);
68}
69
70#define SCU_AVAILABLE 1
71#define SCU_DOWN 2
72
73#endif 47#endif
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 73d8c5398ea..690d1cc9a87 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -5,7 +5,7 @@
5#include <asm/mpspec.h> 5#include <asm/mpspec.h>
6#include <asm/apicdef.h> 6#include <asm/apicdef.h>
7#include <asm/irq_vectors.h> 7#include <asm/irq_vectors.h>
8#include <asm/x86_init.h> 8
9/* 9/*
10 * Intel IO-APIC support for SMP and UP systems. 10 * Intel IO-APIC support for SMP and UP systems.
11 * 11 *
@@ -147,6 +147,7 @@ struct io_apic_irq_attr;
147extern int io_apic_set_pci_routing(struct device *dev, int irq, 147extern int io_apic_set_pci_routing(struct device *dev, int irq,
148 struct io_apic_irq_attr *irq_attr); 148 struct io_apic_irq_attr *irq_attr);
149void setup_IO_APIC_irq_extra(u32 gsi); 149void setup_IO_APIC_irq_extra(u32 gsi);
150extern void ioapic_and_gsi_init(void);
150extern void ioapic_insert_resources(void); 151extern void ioapic_insert_resources(void);
151 152
152int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); 153int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
@@ -175,29 +176,12 @@ extern void mp_save_irq(struct mpc_intsrc *m);
175 176
176extern void disable_ioapic_support(void); 177extern void disable_ioapic_support(void);
177 178
178extern void __init native_io_apic_init_mappings(void);
179extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
180extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
181extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
182
183static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
184{
185 return x86_io_apic_ops.read(apic, reg);
186}
187
188static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
189{
190 x86_io_apic_ops.write(apic, reg, value);
191}
192static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
193{
194 x86_io_apic_ops.modify(apic, reg, value);
195}
196#else /* !CONFIG_X86_IO_APIC */ 179#else /* !CONFIG_X86_IO_APIC */
197 180
198#define io_apic_assign_pci_irqs 0 181#define io_apic_assign_pci_irqs 0
199#define setup_ioapic_ids_from_mpc x86_init_noop 182#define setup_ioapic_ids_from_mpc x86_init_noop
200static const int timer_through_8259 = 0; 183static const int timer_through_8259 = 0;
184static inline void ioapic_and_gsi_init(void) { }
201static inline void ioapic_insert_resources(void) { } 185static inline void ioapic_insert_resources(void) { }
202#define gsi_top (NR_IRQS_LEGACY) 186#define gsi_top (NR_IRQS_LEGACY)
203static inline int mp_find_ioapic(u32 gsi) { return 0; } 187static inline int mp_find_ioapic(u32 gsi) { return 0; }
@@ -219,10 +203,6 @@ static inline int restore_ioapic_entries(void)
219 203
220static inline void mp_save_irq(struct mpc_intsrc *m) { }; 204static inline void mp_save_irq(struct mpc_intsrc *m) { };
221static inline void disable_ioapic_support(void) { } 205static inline void disable_ioapic_support(void) { }
222#define native_io_apic_init_mappings NULL
223#define native_io_apic_read NULL
224#define native_io_apic_write NULL
225#define native_io_apic_modify NULL
226#endif 206#endif
227 207
228#endif /* _ASM_X86_IO_APIC_H */ 208#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index 345c99cef15..dffc38ee625 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -5,6 +5,7 @@ extern struct dma_map_ops nommu_dma_ops;
5extern int force_iommu, no_iommu; 5extern int force_iommu, no_iommu;
6extern int iommu_detected; 6extern int iommu_detected;
7extern int iommu_pass_through; 7extern int iommu_pass_through;
8extern int iommu_group_mf;
8 9
9/* 10 seconds */ 10/* 10 seconds */
10#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) 11#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h
index f42a04735a0..f229b13a5f3 100644
--- a/arch/x86/include/asm/iommu_table.h
+++ b/arch/x86/include/asm/iommu_table.h
@@ -48,7 +48,7 @@ struct iommu_table_entry {
48 48
49 49
50#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ 50#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\
51 static const struct iommu_table_entry \ 51 static const struct iommu_table_entry const \
52 __iommu_entry_##_detect __used \ 52 __iommu_entry_##_detect __used \
53 __attribute__ ((unused, __section__(".iommu_table"), \ 53 __attribute__ ((unused, __section__(".iommu_table"), \
54 aligned((sizeof(void *))))) \ 54 aligned((sizeof(void *))))) \
@@ -63,10 +63,10 @@ struct iommu_table_entry {
63 * to stop detecting the other IOMMUs after yours has been detected. 63 * to stop detecting the other IOMMUs after yours has been detected.
64 */ 64 */
65#define IOMMU_INIT_POST(_detect) \ 65#define IOMMU_INIT_POST(_detect) \
66 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 0) 66 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0)
67 67
68#define IOMMU_INIT_POST_FINISH(detect) \ 68#define IOMMU_INIT_POST_FINISH(detect) \
69 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 1) 69 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1)
70 70
71/* 71/*
72 * A more sophisticated version of IOMMU_INIT. This variant requires: 72 * A more sophisticated version of IOMMU_INIT. This variant requires:
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
index d82250b1deb..77843225b7e 100644
--- a/arch/x86/include/asm/irq_regs.h
+++ b/arch/x86/include/asm/irq_regs.h
@@ -15,7 +15,7 @@ DECLARE_PER_CPU(struct pt_regs *, irq_regs);
15 15
16static inline struct pt_regs *get_irq_regs(void) 16static inline struct pt_regs *get_irq_regs(void)
17{ 17{
18 return this_cpu_read(irq_regs); 18 return percpu_read(irq_regs);
19} 19}
20 20
21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) 21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
@@ -23,7 +23,7 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
23 struct pt_regs *old_regs; 23 struct pt_regs *old_regs;
24 24
25 old_regs = get_irq_regs(); 25 old_regs = get_irq_regs();
26 this_cpu_write(irq_regs, new_regs); 26 percpu_write(irq_regs, new_regs);
27 27
28 return old_regs; 28 return old_regs;
29} 29}
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 5fb9bbbd2f1..47d99934580 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -1,101 +1,45 @@
1/* 1#ifndef _ASM_X86_IRQ_REMAPPING_H
2 * Copyright (C) 2012 Advanced Micro Devices, Inc. 2#define _ASM_X86_IRQ_REMAPPING_H
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * This header file contains the interface of the interrupt remapping code to
19 * the x86 interrupt management code.
20 */
21 3
22#ifndef __X86_IRQ_REMAPPING_H 4#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
23#define __X86_IRQ_REMAPPING_H
24
25#include <asm/io_apic.h>
26 5
27#ifdef CONFIG_IRQ_REMAP 6#ifdef CONFIG_IRQ_REMAP
28 7static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
29extern int irq_remapping_enabled; 8static inline void prepare_irte(struct irte *irte, int vector,
30 9 unsigned int dest)
31extern void setup_irq_remapping_ops(void);
32extern int irq_remapping_supported(void);
33extern int irq_remapping_prepare(void);
34extern int irq_remapping_enable(void);
35extern void irq_remapping_disable(void);
36extern int irq_remapping_reenable(int);
37extern int irq_remap_enable_fault_handling(void);
38extern int setup_ioapic_remapped_entry(int irq,
39 struct IO_APIC_route_entry *entry,
40 unsigned int destination,
41 int vector,
42 struct io_apic_irq_attr *attr);
43extern int set_remapped_irq_affinity(struct irq_data *data,
44 const struct cpumask *mask,
45 bool force);
46extern void free_remapped_irq(int irq);
47extern void compose_remapped_msi_msg(struct pci_dev *pdev,
48 unsigned int irq, unsigned int dest,
49 struct msi_msg *msg, u8 hpet_id);
50extern int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
51extern int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
52 int index, int sub_handle);
53extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
54
55#else /* CONFIG_IRQ_REMAP */
56
57#define irq_remapping_enabled 0
58
59static inline void setup_irq_remapping_ops(void) { }
60static inline int irq_remapping_supported(void) { return 0; }
61static inline int irq_remapping_prepare(void) { return -ENODEV; }
62static inline int irq_remapping_enable(void) { return -ENODEV; }
63static inline void irq_remapping_disable(void) { }
64static inline int irq_remapping_reenable(int eim) { return -ENODEV; }
65static inline int irq_remap_enable_fault_handling(void) { return -ENODEV; }
66static inline int setup_ioapic_remapped_entry(int irq,
67 struct IO_APIC_route_entry *entry,
68 unsigned int destination,
69 int vector,
70 struct io_apic_irq_attr *attr)
71{
72 return -ENODEV;
73}
74static inline int set_remapped_irq_affinity(struct irq_data *data,
75 const struct cpumask *mask,
76 bool force)
77{ 10{
78 return 0; 11 memset(irte, 0, sizeof(*irte));
12
13 irte->present = 1;
14 irte->dst_mode = apic->irq_dest_mode;
15 /*
16 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
17 * actual level or edge trigger will be setup in the IO-APIC
18 * RTE. This will help simplify level triggered irq migration.
19 * For more details, see the comments (in io_apic.c) explainig IO-APIC
20 * irq migration in the presence of interrupt-remapping.
21 */
22 irte->trigger_mode = 0;
23 irte->dlvry_mode = apic->irq_delivery_mode;
24 irte->vector = vector;
25 irte->dest_id = IRTE_DEST(dest);
26 irte->redir_hint = 1;
79} 27}
80static inline void free_remapped_irq(int irq) { } 28static inline bool irq_remapped(struct irq_cfg *cfg)
81static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
82 unsigned int irq, unsigned int dest,
83 struct msi_msg *msg, u8 hpet_id)
84{ 29{
30 return cfg->irq_2_iommu.iommu != NULL;
85} 31}
86static inline int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) 32#else
33static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
87{ 34{
88 return -ENODEV;
89} 35}
90static inline int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, 36static inline bool irq_remapped(struct irq_cfg *cfg)
91 int index, int sub_handle)
92{ 37{
93 return -ENODEV; 38 return false;
94} 39}
95static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) 40static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
96{ 41{
97 return -ENODEV;
98} 42}
99#endif /* CONFIG_IRQ_REMAP */ 43#endif
100 44
101#endif /* __X86_IRQ_REMAPPING_H */ 45#endif /* _ASM_X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 1508e518c7e..7e50f06393a 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -119,6 +119,17 @@
119 */ 119 */
120#define LOCAL_TIMER_VECTOR 0xef 120#define LOCAL_TIMER_VECTOR 0xef
121 121
122/* up to 32 vectors used for spreading out TLB flushes: */
123#if NR_CPUS <= 32
124# define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS)
125#else
126# define NUM_INVALIDATE_TLB_VECTORS (32)
127#endif
128
129#define INVALIDATE_TLB_VECTOR_END (0xee)
130#define INVALIDATE_TLB_VECTOR_START \
131 (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
132
122#define NR_VECTORS 256 133#define NR_VECTORS 256
123 134
124#define FPU_IRQ 13 135#define FPU_IRQ 13
@@ -149,11 +160,19 @@ static inline int invalid_vm86_irq(int irq)
149#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) 160#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
150 161
151#ifdef CONFIG_X86_IO_APIC 162#ifdef CONFIG_X86_IO_APIC
152# define CPU_VECTOR_LIMIT (64 * NR_CPUS) 163# ifdef CONFIG_SPARSE_IRQ
153# define NR_IRQS \ 164# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
165# define NR_IRQS \
154 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ 166 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
155 (NR_VECTORS + CPU_VECTOR_LIMIT) : \ 167 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
156 (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) 168 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
169# else
170# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
171# define NR_IRQS \
172 (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
173 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
174 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
175# endif
157#else /* !CONFIG_X86_IO_APIC: */ 176#else /* !CONFIG_X86_IO_APIC: */
158# define NR_IRQS NR_IRQS_LEGACY 177# define NR_IRQS NR_IRQS_LEGACY
159#endif 178#endif
diff --git a/arch/x86/include/asm/ist.h b/arch/x86/include/asm/ist.h
index c9803f1a203..7e5dff1de0e 100644
--- a/arch/x86/include/asm/ist.h
+++ b/arch/x86/include/asm/ist.h
@@ -1,3 +1,6 @@
1#ifndef _ASM_X86_IST_H
2#define _ASM_X86_IST_H
3
1/* 4/*
2 * Include file for the interface to IST BIOS 5 * Include file for the interface to IST BIOS
3 * Copyright 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright 2002 Andy Grover <andrew.grover@intel.com>
@@ -12,12 +15,20 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details. 16 * General Public License for more details.
14 */ 17 */
15#ifndef _ASM_X86_IST_H
16#define _ASM_X86_IST_H
17 18
18#include <uapi/asm/ist.h>
19 19
20#include <linux/types.h>
21
22struct ist_info {
23 __u32 signature;
24 __u32 command;
25 __u32 event;
26 __u32 perf_level;
27};
28
29#ifdef __KERNEL__
20 30
21extern struct ist_info ist_info; 31extern struct ist_info ist_info;
22 32
33#endif /* __KERNEL__ */
23#endif /* _ASM_X86_IST_H */ 34#endif /* _ASM_X86_IST_H */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 3a16c1483b4..a32b18ce6ea 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -9,12 +9,12 @@
9 9
10#define JUMP_LABEL_NOP_SIZE 5 10#define JUMP_LABEL_NOP_SIZE 5
11 11
12#define STATIC_KEY_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" 12#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
13 13
14static __always_inline bool arch_static_branch(struct static_key *key) 14static __always_inline bool arch_static_branch(struct jump_label_key *key)
15{ 15{
16 asm goto("1:" 16 asm goto("1:"
17 STATIC_KEY_INITIAL_NOP 17 JUMP_LABEL_INITIAL_NOP
18 ".pushsection __jump_table, \"aw\" \n\t" 18 ".pushsection __jump_table, \"aw\" \n\t"
19 _ASM_ALIGN "\n\t" 19 _ASM_ALIGN "\n\t"
20 _ASM_PTR "1b, %l[l_yes], %c0 \n\t" 20 _ASM_PTR "1b, %l[l_yes], %c0 \n\t"
diff --git a/arch/x86/include/asm/kbdleds.h b/arch/x86/include/asm/kbdleds.h
deleted file mode 100644
index f27ac5ff597..00000000000
--- a/arch/x86/include/asm/kbdleds.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _ASM_X86_KBDLEDS_H
2#define _ASM_X86_KBDLEDS_H
3
4/*
5 * Some laptops take the 789uiojklm,. keys as number pad when NumLock is on.
6 * This seems a good reason to start with NumLock off. That's why on X86 we
7 * ask the bios for the correct state.
8 */
9
10#include <asm/setup.h>
11
12static inline int kbd_defleds(void)
13{
14 return boot_params.kbd_status & 0x20 ? (1 << VC_NUMLOCK) : 0;
15}
16
17#endif /* _ASM_X86_KBDLEDS_H */
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 2c37aadcbc3..d73f1571bde 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -24,6 +24,7 @@ enum die_val {
24extern void printk_address(unsigned long address, int reliable); 24extern void printk_address(unsigned long address, int reliable);
25extern void die(const char *, struct pt_regs *,long); 25extern void die(const char *, struct pt_regs *,long);
26extern int __must_check __die(const char *, struct pt_regs *, long); 26extern int __must_check __die(const char *, struct pt_regs *, long);
27extern void show_registers(struct pt_regs *regs);
27extern void show_trace(struct task_struct *t, struct pt_regs *regs, 28extern void show_trace(struct task_struct *t, struct pt_regs *regs,
28 unsigned long *sp, unsigned long bp); 29 unsigned long *sp, unsigned long bp);
29extern void __show_regs(struct pt_regs *regs, int all); 30extern void __show_regs(struct pt_regs *regs, int all);
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 6080d2694ba..317ff1703d0 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -163,9 +163,6 @@ struct kimage_arch {
163}; 163};
164#endif 164#endif
165 165
166typedef void crash_vmclear_fn(void);
167extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
168
169#endif /* __ASSEMBLY__ */ 166#endif /* __ASSEMBLY__ */
170 167
171#endif /* _ASM_X86_KEXEC_H */ 168#endif /* _ASM_X86_KEXEC_H */
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 332f98c9111..77e95f54570 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -64,15 +64,11 @@ enum regnames {
64 GDB_PS, /* 17 */ 64 GDB_PS, /* 17 */
65 GDB_CS, /* 18 */ 65 GDB_CS, /* 18 */
66 GDB_SS, /* 19 */ 66 GDB_SS, /* 19 */
67 GDB_DS, /* 20 */
68 GDB_ES, /* 21 */
69 GDB_FS, /* 22 */
70 GDB_GS, /* 23 */
71}; 67};
72#define GDB_ORIG_AX 57 68#define GDB_ORIG_AX 57
73#define DBG_MAX_REG_NUM 24 69#define DBG_MAX_REG_NUM 20
74/* 17 64 bit regs and 5 32 bit regs */ 70/* 17 64 bit regs and 3 32 bit regs */
75#define NUMREGBYTES ((17 * 8) + (5 * 4)) 71#define NUMREGBYTES ((17 * 8) + (3 * 4))
76#endif /* ! CONFIG_X86_32 */ 72#endif /* ! CONFIG_X86_32 */
77 73
78static inline void arch_kgdb_breakpoint(void) 74static inline void arch_kgdb_breakpoint(void)
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index d3ddd17405d..54788253915 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -27,7 +27,6 @@
27#include <asm/insn.h> 27#include <asm/insn.h>
28 28
29#define __ARCH_WANT_KPROBES_INSN_SLOT 29#define __ARCH_WANT_KPROBES_INSN_SLOT
30#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
31 30
32struct pt_regs; 31struct pt_regs;
33struct kprobe; 32struct kprobe;
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 15f960c06ff..6040d115ef5 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -86,19 +86,6 @@ struct x86_instruction_info {
86 86
87struct x86_emulate_ops { 87struct x86_emulate_ops {
88 /* 88 /*
89 * read_gpr: read a general purpose register (rax - r15)
90 *
91 * @reg: gpr number.
92 */
93 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg);
94 /*
95 * write_gpr: write a general purpose register (rax - r15)
96 *
97 * @reg: gpr number.
98 * @val: value to write.
99 */
100 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val);
101 /*
102 * read_std: Read bytes of standard (non-emulated/special) memory. 89 * read_std: Read bytes of standard (non-emulated/special) memory.
103 * Used for descriptor reading. 90 * Used for descriptor reading.
104 * @addr: [IN ] Linear address from which to read. 91 * @addr: [IN ] Linear address from which to read.
@@ -189,13 +176,11 @@ struct x86_emulate_ops {
189 void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); 176 void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
190 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr); 177 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
191 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); 178 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
192 void (*set_rflags)(struct x86_emulate_ctxt *ctxt, ulong val);
193 int (*cpl)(struct x86_emulate_ctxt *ctxt); 179 int (*cpl)(struct x86_emulate_ctxt *ctxt);
194 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); 180 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
195 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); 181 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
196 int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); 182 int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
197 int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); 183 int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
198 int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
199 void (*halt)(struct x86_emulate_ctxt *ctxt); 184 void (*halt)(struct x86_emulate_ctxt *ctxt);
200 void (*wbinvd)(struct x86_emulate_ctxt *ctxt); 185 void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
201 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt); 186 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
@@ -204,18 +189,14 @@ struct x86_emulate_ops {
204 int (*intercept)(struct x86_emulate_ctxt *ctxt, 189 int (*intercept)(struct x86_emulate_ctxt *ctxt,
205 struct x86_instruction_info *info, 190 struct x86_instruction_info *info,
206 enum x86_intercept_stage stage); 191 enum x86_intercept_stage stage);
207
208 void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
209 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
210}; 192};
211 193
212typedef u32 __attribute__((vector_size(16))) sse128_t; 194typedef u32 __attribute__((vector_size(16))) sse128_t;
213 195
214/* Type, address-of, and value of an instruction's operand. */ 196/* Type, address-of, and value of an instruction's operand. */
215struct operand { 197struct operand {
216 enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type; 198 enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_NONE } type;
217 unsigned int bytes; 199 unsigned int bytes;
218 unsigned int count;
219 union { 200 union {
220 unsigned long orig_val; 201 unsigned long orig_val;
221 u64 orig_val64; 202 u64 orig_val64;
@@ -227,15 +208,12 @@ struct operand {
227 unsigned seg; 208 unsigned seg;
228 } mem; 209 } mem;
229 unsigned xmm; 210 unsigned xmm;
230 unsigned mm;
231 } addr; 211 } addr;
232 union { 212 union {
233 unsigned long val; 213 unsigned long val;
234 u64 val64; 214 u64 val64;
235 char valptr[sizeof(unsigned long) + 2]; 215 char valptr[sizeof(unsigned long) + 2];
236 sse128_t vec_val; 216 sse128_t vec_val;
237 u64 mm_val;
238 void *data;
239 }; 217 };
240}; 218};
241 219
@@ -251,23 +229,14 @@ struct read_cache {
251 unsigned long end; 229 unsigned long end;
252}; 230};
253 231
254/* Execution mode, passed to the emulator. */
255enum x86emul_mode {
256 X86EMUL_MODE_REAL, /* Real mode. */
257 X86EMUL_MODE_VM86, /* Virtual 8086 mode. */
258 X86EMUL_MODE_PROT16, /* 16-bit protected mode. */
259 X86EMUL_MODE_PROT32, /* 32-bit protected mode. */
260 X86EMUL_MODE_PROT64, /* 64-bit (long) mode. */
261};
262
263struct x86_emulate_ctxt { 232struct x86_emulate_ctxt {
264 const struct x86_emulate_ops *ops; 233 struct x86_emulate_ops *ops;
265 234
266 /* Register state before/after emulation. */ 235 /* Register state before/after emulation. */
267 unsigned long eflags; 236 unsigned long eflags;
268 unsigned long eip; /* eip before instruction emulation */ 237 unsigned long eip; /* eip before instruction emulation */
269 /* Emulated execution mode, represented by an X86EMUL_MODE value. */ 238 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
270 enum x86emul_mode mode; 239 int mode;
271 240
272 /* interruptibility state, as a result of execution of STI or MOV SS */ 241 /* interruptibility state, as a result of execution of STI or MOV SS */
273 int interruptibility; 242 int interruptibility;
@@ -293,7 +262,7 @@ struct x86_emulate_ctxt {
293 struct operand dst; 262 struct operand dst;
294 bool has_seg_override; 263 bool has_seg_override;
295 u8 seg_override; 264 u8 seg_override;
296 u64 d; 265 unsigned int d;
297 int (*execute)(struct x86_emulate_ctxt *ctxt); 266 int (*execute)(struct x86_emulate_ctxt *ctxt);
298 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 267 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
299 /* modrm */ 268 /* modrm */
@@ -304,12 +273,8 @@ struct x86_emulate_ctxt {
304 u8 modrm_seg; 273 u8 modrm_seg;
305 bool rip_relative; 274 bool rip_relative;
306 unsigned long _eip; 275 unsigned long _eip;
307 struct operand memop;
308 u32 regs_valid; /* bitmaps of registers in _regs[] that can be read */
309 u32 regs_dirty; /* bitmaps of registers in _regs[] that have been written */
310 /* Fields above regs are cleared together. */ 276 /* Fields above regs are cleared together. */
311 unsigned long _regs[NR_VCPU_REGS]; 277 unsigned long regs[NR_VCPU_REGS];
312 struct operand *memopp;
313 struct fetch_cache fetch; 278 struct fetch_cache fetch;
314 struct read_cache io_read; 279 struct read_cache io_read;
315 struct read_cache mem_read; 280 struct read_cache mem_read;
@@ -319,18 +284,16 @@ struct x86_emulate_ctxt {
319#define REPE_PREFIX 0xf3 284#define REPE_PREFIX 0xf3
320#define REPNE_PREFIX 0xf2 285#define REPNE_PREFIX 0xf2
321 286
322/* CPUID vendors */ 287/* Execution mode, passed to the emulator. */
323#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 288#define X86EMUL_MODE_REAL 0 /* Real mode. */
324#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 289#define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */
325#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 290#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
326 291#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
327#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41 292#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
328#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
329#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
330 293
331#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 294/* any protected mode */
332#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e 295#define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \
333#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 296 X86EMUL_MODE_PROT64)
334 297
335enum x86_intercept_stage { 298enum x86_intercept_stage {
336 X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ 299 X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */
@@ -399,17 +362,13 @@ enum x86_intercept {
399#endif 362#endif
400 363
401int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len); 364int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
402bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
403#define EMULATION_FAILED -1 365#define EMULATION_FAILED -1
404#define EMULATION_OK 0 366#define EMULATION_OK 0
405#define EMULATION_RESTART 1 367#define EMULATION_RESTART 1
406#define EMULATION_INTERCEPTED 2 368#define EMULATION_INTERCEPTED 2
407int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); 369int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
408int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 370int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
409 u16 tss_selector, int idt_index, int reason, 371 u16 tss_selector, int reason,
410 bool has_error_code, u32 error_code); 372 bool has_error_code, u32 error_code);
411int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); 373int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
412void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt);
413void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt);
414
415#endif /* _ASM_X86_KVM_X86_EMULATE_H */ 374#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
deleted file mode 100644
index a92b1763c41..00000000000
--- a/arch/x86/include/asm/kvm_guest.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_X86_KVM_GUEST_H
2#define _ASM_X86_KVM_GUEST_H
3
4int kvm_setup_vsyscall_timeinfo(void);
5
6#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dc87b65e9c3..dd51c83aa5d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -16,28 +16,20 @@
16#include <linux/mmu_notifier.h> 16#include <linux/mmu_notifier.h>
17#include <linux/tracepoint.h> 17#include <linux/tracepoint.h>
18#include <linux/cpumask.h> 18#include <linux/cpumask.h>
19#include <linux/irq_work.h>
20 19
21#include <linux/kvm.h> 20#include <linux/kvm.h>
22#include <linux/kvm_para.h> 21#include <linux/kvm_para.h>
23#include <linux/kvm_types.h> 22#include <linux/kvm_types.h>
24#include <linux/perf_event.h>
25#include <linux/pvclock_gtod.h>
26#include <linux/clocksource.h>
27 23
28#include <asm/pvclock-abi.h> 24#include <asm/pvclock-abi.h>
29#include <asm/desc.h> 25#include <asm/desc.h>
30#include <asm/mtrr.h> 26#include <asm/mtrr.h>
31#include <asm/msr-index.h> 27#include <asm/msr-index.h>
32#include <asm/asm.h>
33 28
34#define KVM_MAX_VCPUS 254 29#define KVM_MAX_VCPUS 64
35#define KVM_SOFT_MAX_VCPUS 160
36#define KVM_MEMORY_SLOTS 32 30#define KVM_MEMORY_SLOTS 32
37/* memory slots that does not exposed to userspace */ 31/* memory slots that does not exposed to userspace */
38#define KVM_PRIVATE_MEM_SLOTS 4 32#define KVM_PRIVATE_MEM_SLOTS 4
39#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
40
41#define KVM_MMIO_SIZE 16 33#define KVM_MMIO_SIZE 16
42 34
43#define KVM_PIO_PAGE_OFFSET 1 35#define KVM_PIO_PAGE_OFFSET 1
@@ -50,13 +42,12 @@
50 42
51#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 43#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
52#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 44#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
53#define CR3_PCID_ENABLED_RESERVED_BITS 0xFFFFFF0000000000ULL
54#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 45#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
55 0xFFFFFF0000000000ULL) 46 0xFFFFFF0000000000ULL)
56#define CR4_RESERVED_BITS \ 47#define CR4_RESERVED_BITS \
57 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ 48 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
58 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 49 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
59 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ 50 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
60 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \ 51 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \
61 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) 52 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
62 53
@@ -77,6 +68,22 @@
77#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 68#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
78#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 69#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
79 70
71#define DE_VECTOR 0
72#define DB_VECTOR 1
73#define BP_VECTOR 3
74#define OF_VECTOR 4
75#define BR_VECTOR 5
76#define UD_VECTOR 6
77#define NM_VECTOR 7
78#define DF_VECTOR 8
79#define TS_VECTOR 10
80#define NP_VECTOR 11
81#define SS_VECTOR 12
82#define GP_VECTOR 13
83#define PF_VECTOR 14
84#define MF_VECTOR 16
85#define MC_VECTOR 18
86
80#define SELECTOR_TI_MASK (1 << 2) 87#define SELECTOR_TI_MASK (1 << 2)
81#define SELECTOR_RPL_MASK 0x03 88#define SELECTOR_RPL_MASK 0x03
82 89
@@ -160,16 +167,6 @@ enum {
160#define DR7_FIXED_1 0x00000400 167#define DR7_FIXED_1 0x00000400
161#define DR7_VOLATILE 0xffff23ff 168#define DR7_VOLATILE 0xffff23ff
162 169
163/* apic attention bits */
164#define KVM_APIC_CHECK_VAPIC 0
165/*
166 * The following bit is set with PV-EOI, unset on EOI.
167 * We detect PV-EOI changes by guest by comparing
168 * this bit with PV-EOI in guest memory.
169 * See the implementation in apic_update_pv_eoi.
170 */
171#define KVM_APIC_PV_EOI_PENDING 1
172
173/* 170/*
174 * We don't want allocation failures within the mmu code, so we preallocate 171 * We don't want allocation failures within the mmu code, so we preallocate
175 * enough memory for a single page fault in a cache. 172 * enough memory for a single page fault in a cache.
@@ -179,6 +176,13 @@ struct kvm_mmu_memory_cache {
179 void *objects[KVM_NR_MEM_OBJS]; 176 void *objects[KVM_NR_MEM_OBJS];
180}; 177};
181 178
179#define NR_PTE_CHAIN_ENTRIES 5
180
181struct kvm_pte_chain {
182 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
183 struct hlist_node link;
184};
185
182/* 186/*
183 * kvm_mmu_page_role, below, is defined as: 187 * kvm_mmu_page_role, below, is defined as:
184 * 188 *
@@ -223,7 +227,7 @@ struct kvm_mmu_page {
223 * One bit set per slot which has memory 227 * One bit set per slot which has memory
224 * in this shadow page. 228 * in this shadow page.
225 */ 229 */
226 DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM); 230 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
227 bool unsync; 231 bool unsync;
228 int root_count; /* Currently serving as active root */ 232 int root_count; /* Currently serving as active root */
229 unsigned int unsync_children; 233 unsigned int unsync_children;
@@ -234,7 +238,14 @@ struct kvm_mmu_page {
234 int clear_spte_count; 238 int clear_spte_count;
235#endif 239#endif
236 240
237 int write_flooding_count; 241 struct rcu_head rcu;
242};
243
244struct kvm_pv_mmu_op_buffer {
245 void *ptr;
246 unsigned len;
247 unsigned processed;
248 char buf[512] __aligned(sizeof(long));
238}; 249};
239 250
240struct kvm_pio_request { 251struct kvm_pio_request {
@@ -253,7 +264,6 @@ struct kvm_mmu {
253 void (*new_cr3)(struct kvm_vcpu *vcpu); 264 void (*new_cr3)(struct kvm_vcpu *vcpu);
254 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 265 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
255 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 266 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
256 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
257 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, 267 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
258 bool prefault); 268 bool prefault);
259 void (*inject_page_fault)(struct kvm_vcpu *vcpu, 269 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
@@ -273,60 +283,15 @@ struct kvm_mmu {
273 union kvm_mmu_page_role base_role; 283 union kvm_mmu_page_role base_role;
274 bool direct_map; 284 bool direct_map;
275 285
276 /*
277 * Bitmap; bit set = permission fault
278 * Byte index: page fault error code [4:1]
279 * Bit index: pte permissions in ACC_* format
280 */
281 u8 permissions[16];
282
283 u64 *pae_root; 286 u64 *pae_root;
284 u64 *lm_root; 287 u64 *lm_root;
285 u64 rsvd_bits_mask[2][4]; 288 u64 rsvd_bits_mask[2][4];
286 289
287 /*
288 * Bitmap: bit set = last pte in walk
289 * index[0:1]: level (zero-based)
290 * index[2]: pte.ps
291 */
292 u8 last_pte_bitmap;
293
294 bool nx; 290 bool nx;
295 291
296 u64 pdptrs[4]; /* pae */ 292 u64 pdptrs[4]; /* pae */
297}; 293};
298 294
299enum pmc_type {
300 KVM_PMC_GP = 0,
301 KVM_PMC_FIXED,
302};
303
304struct kvm_pmc {
305 enum pmc_type type;
306 u8 idx;
307 u64 counter;
308 u64 eventsel;
309 struct perf_event *perf_event;
310 struct kvm_vcpu *vcpu;
311};
312
313struct kvm_pmu {
314 unsigned nr_arch_gp_counters;
315 unsigned nr_arch_fixed_counters;
316 unsigned available_event_types;
317 u64 fixed_ctr_ctrl;
318 u64 global_ctrl;
319 u64 global_status;
320 u64 global_ovf_ctrl;
321 u64 counter_bitmask[2];
322 u64 global_ctrl_mask;
323 u8 version;
324 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
325 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
326 struct irq_work irq_work;
327 u64 reprogram_pmi;
328};
329
330struct kvm_vcpu_arch { 295struct kvm_vcpu_arch {
331 /* 296 /*
332 * rip and regs accesses must go through 297 * rip and regs accesses must go through
@@ -347,7 +312,6 @@ struct kvm_vcpu_arch {
347 u64 efer; 312 u64 efer;
348 u64 apic_base; 313 u64 apic_base;
349 struct kvm_lapic *apic; /* kernel irqchip context */ 314 struct kvm_lapic *apic; /* kernel irqchip context */
350 unsigned long apic_attention;
351 int32_t apic_arb_prio; 315 int32_t apic_arb_prio;
352 int mp_state; 316 int mp_state;
353 int sipi_vector; 317 int sipi_vector;
@@ -379,10 +343,19 @@ struct kvm_vcpu_arch {
379 */ 343 */
380 struct kvm_mmu *walk_mmu; 344 struct kvm_mmu *walk_mmu;
381 345
346 /* only needed in kvm_pv_mmu_op() path, but it's hot so
347 * put it here to avoid allocation */
348 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
349
382 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; 350 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
383 struct kvm_mmu_memory_cache mmu_page_cache; 351 struct kvm_mmu_memory_cache mmu_page_cache;
384 struct kvm_mmu_memory_cache mmu_page_header_cache; 352 struct kvm_mmu_memory_cache mmu_page_header_cache;
385 353
354 gfn_t last_pt_write_gfn;
355 int last_pt_write_count;
356 u64 *last_pte_updated;
357 gfn_t last_pte_gfn;
358
386 struct fpu guest_fpu; 359 struct fpu guest_fpu;
387 u64 xcr0; 360 u64 xcr0;
388 361
@@ -414,15 +387,12 @@ struct kvm_vcpu_arch {
414 struct x86_emulate_ctxt emulate_ctxt; 387 struct x86_emulate_ctxt emulate_ctxt;
415 bool emulate_regs_need_sync_to_vcpu; 388 bool emulate_regs_need_sync_to_vcpu;
416 bool emulate_regs_need_sync_from_vcpu; 389 bool emulate_regs_need_sync_from_vcpu;
417 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
418 390
419 gpa_t time; 391 gpa_t time;
420 struct pvclock_vcpu_time_info hv_clock; 392 struct pvclock_vcpu_time_info hv_clock;
421 unsigned int hw_tsc_khz; 393 unsigned int hw_tsc_khz;
422 unsigned int time_offset; 394 unsigned int time_offset;
423 struct page *time_page; 395 struct page *time_page;
424 /* set guest stopped flag in pvclock flags field */
425 bool pvclock_set_guest_stopped_request;
426 396
427 struct { 397 struct {
428 u64 msr_val; 398 u64 msr_val;
@@ -434,21 +404,15 @@ struct kvm_vcpu_arch {
434 404
435 u64 last_guest_tsc; 405 u64 last_guest_tsc;
436 u64 last_kernel_ns; 406 u64 last_kernel_ns;
437 u64 last_host_tsc; 407 u64 last_tsc_nsec;
438 u64 tsc_offset_adjustment; 408 u64 last_tsc_write;
439 u64 this_tsc_nsec;
440 u64 this_tsc_write;
441 u8 this_tsc_generation;
442 bool tsc_catchup;
443 bool tsc_always_catchup;
444 s8 virtual_tsc_shift;
445 u32 virtual_tsc_mult;
446 u32 virtual_tsc_khz; 409 u32 virtual_tsc_khz;
447 s64 ia32_tsc_adjust_msr; 410 bool tsc_catchup;
411 u32 tsc_catchup_mult;
412 s8 tsc_catchup_shift;
448 413
449 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ 414 bool nmi_pending;
450 unsigned nmi_pending; /* NMI queued after currently running handler */ 415 bool nmi_injected;
451 bool nmi_injected; /* Trying to inject an NMI this entry */
452 416
453 struct mtrr_state_type mtrr_state; 417 struct mtrr_state_type mtrr_state;
454 u32 pat; 418 u32 pat;
@@ -458,7 +422,6 @@ struct kvm_vcpu_arch {
458 unsigned long dr6; 422 unsigned long dr6;
459 unsigned long dr7; 423 unsigned long dr7;
460 unsigned long eff_db[KVM_NR_DB_REGS]; 424 unsigned long eff_db[KVM_NR_DB_REGS];
461 unsigned long guest_debug_dr7;
462 425
463 u64 mcg_cap; 426 u64 mcg_cap;
464 u64 mcg_status; 427 u64 mcg_status;
@@ -470,8 +433,6 @@ struct kvm_vcpu_arch {
470 unsigned access; 433 unsigned access;
471 gfn_t mmio_gfn; 434 gfn_t mmio_gfn;
472 435
473 struct kvm_pmu pmu;
474
475 /* used for guest single stepping over the given code position */ 436 /* used for guest single stepping over the given code position */
476 unsigned long singlestep_rip; 437 unsigned long singlestep_rip;
477 438
@@ -480,9 +441,6 @@ struct kvm_vcpu_arch {
480 441
481 cpumask_var_t wbinvd_dirty_mask; 442 cpumask_var_t wbinvd_dirty_mask;
482 443
483 unsigned long last_retry_eip;
484 unsigned long last_retry_addr;
485
486 struct { 444 struct {
487 bool halted; 445 bool halted;
488 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; 446 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
@@ -491,36 +449,6 @@ struct kvm_vcpu_arch {
491 u32 id; 449 u32 id;
492 bool send_user_only; 450 bool send_user_only;
493 } apf; 451 } apf;
494
495 /* OSVW MSRs (AMD only) */
496 struct {
497 u64 length;
498 u64 status;
499 } osvw;
500
501 struct {
502 u64 msr_val;
503 struct gfn_to_hva_cache data;
504 } pv_eoi;
505};
506
507struct kvm_lpage_info {
508 int write_count;
509};
510
511struct kvm_arch_memory_slot {
512 unsigned long *rmap[KVM_NR_PAGE_SIZES];
513 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
514};
515
516struct kvm_apic_map {
517 struct rcu_head rcu;
518 u8 ldr_bits;
519 /* fields bellow are used to decode ldr values in different modes */
520 u32 cid_shift, cid_mask, lid_mask;
521 struct kvm_lapic *phys_map[256];
522 /* first index is cluster id second is cpu id in a cluster */
523 struct kvm_lapic *logical_map[16][16];
524}; 452};
525 453
526struct kvm_arch { 454struct kvm_arch {
@@ -528,6 +456,7 @@ struct kvm_arch {
528 unsigned int n_requested_mmu_pages; 456 unsigned int n_requested_mmu_pages;
529 unsigned int n_max_mmu_pages; 457 unsigned int n_max_mmu_pages;
530 unsigned int indirect_shadow_pages; 458 unsigned int indirect_shadow_pages;
459 atomic_t invlpg_counter;
531 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 460 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
532 /* 461 /*
533 * Hash table of struct kvm_mmu_page. 462 * Hash table of struct kvm_mmu_page.
@@ -540,8 +469,6 @@ struct kvm_arch {
540 struct kvm_ioapic *vioapic; 469 struct kvm_ioapic *vioapic;
541 struct kvm_pit *vpit; 470 struct kvm_pit *vpit;
542 int vapics_in_nmi_mode; 471 int vapics_in_nmi_mode;
543 struct mutex apic_map_lock;
544 struct kvm_apic_map *apic_map;
545 472
546 unsigned int tss_addr; 473 unsigned int tss_addr;
547 struct page *apic_access_page; 474 struct page *apic_access_page;
@@ -556,18 +483,8 @@ struct kvm_arch {
556 s64 kvmclock_offset; 483 s64 kvmclock_offset;
557 raw_spinlock_t tsc_write_lock; 484 raw_spinlock_t tsc_write_lock;
558 u64 last_tsc_nsec; 485 u64 last_tsc_nsec;
486 u64 last_tsc_offset;
559 u64 last_tsc_write; 487 u64 last_tsc_write;
560 u32 last_tsc_khz;
561 u64 cur_tsc_nsec;
562 u64 cur_tsc_write;
563 u64 cur_tsc_offset;
564 u8 cur_tsc_generation;
565 int nr_vcpus_matched_tsc;
566
567 spinlock_t pvclock_gtod_sync_lock;
568 bool use_master_clock;
569 u64 master_kernel_ns;
570 cycle_t master_cycle_now;
571 488
572 struct kvm_xen_hvm_config xen_hvm_config; 489 struct kvm_xen_hvm_config xen_hvm_config;
573 490
@@ -575,6 +492,8 @@ struct kvm_arch {
575 u64 hv_guest_os_id; 492 u64 hv_guest_os_id;
576 u64 hv_hypercall; 493 u64 hv_hypercall;
577 494
495 atomic_t reader_counter;
496
578 #ifdef CONFIG_KVM_MMU_AUDIT 497 #ifdef CONFIG_KVM_MMU_AUDIT
579 int audit_point; 498 int audit_point;
580 #endif 499 #endif
@@ -621,12 +540,6 @@ struct kvm_vcpu_stat {
621 540
622struct x86_instruction_info; 541struct x86_instruction_info;
623 542
624struct msr_data {
625 bool host_initiated;
626 u32 index;
627 u64 data;
628};
629
630struct kvm_x86_ops { 543struct kvm_x86_ops {
631 int (*cpu_has_kvm_support)(void); /* __init */ 544 int (*cpu_has_kvm_support)(void); /* __init */
632 int (*disabled_by_bios)(void); /* __init */ 545 int (*disabled_by_bios)(void); /* __init */
@@ -647,9 +560,10 @@ struct kvm_x86_ops {
647 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 560 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
648 void (*vcpu_put)(struct kvm_vcpu *vcpu); 561 void (*vcpu_put)(struct kvm_vcpu *vcpu);
649 562
650 void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); 563 void (*set_guest_debug)(struct kvm_vcpu *vcpu,
564 struct kvm_guest_debug *dbg);
651 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 565 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
652 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); 566 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
653 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 567 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
654 void (*get_segment)(struct kvm_vcpu *vcpu, 568 void (*get_segment)(struct kvm_vcpu *vcpu,
655 struct kvm_segment *var, int seg); 569 struct kvm_segment *var, int seg);
@@ -702,8 +616,7 @@ struct kvm_x86_ops {
702 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 616 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
703 int (*get_lpage_level)(void); 617 int (*get_lpage_level)(void);
704 bool (*rdtscp_supported)(void); 618 bool (*rdtscp_supported)(void);
705 bool (*invpcid_supported)(void); 619 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
706 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
707 620
708 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 621 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
709 622
@@ -711,18 +624,18 @@ struct kvm_x86_ops {
711 624
712 bool (*has_wbinvd_exit)(void); 625 bool (*has_wbinvd_exit)(void);
713 626
714 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale); 627 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
715 u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
716 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 628 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
717 629
718 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); 630 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
719 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
720 631
721 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 632 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
722 633
723 int (*check_intercept)(struct kvm_vcpu *vcpu, 634 int (*check_intercept)(struct kvm_vcpu *vcpu,
724 struct x86_instruction_info *info, 635 struct x86_instruction_info *info,
725 enum x86_intercept_stage stage); 636 enum x86_intercept_stage stage);
637
638 const struct trace_print_flags *exit_reasons_str;
726}; 639};
727 640
728struct kvm_arch_async_pf { 641struct kvm_arch_async_pf {
@@ -734,17 +647,6 @@ struct kvm_arch_async_pf {
734 647
735extern struct kvm_x86_ops *kvm_x86_ops; 648extern struct kvm_x86_ops *kvm_x86_ops;
736 649
737static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
738 s64 adjustment)
739{
740 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
741}
742
743static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
744{
745 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
746}
747
748int kvm_mmu_module_init(void); 650int kvm_mmu_module_init(void);
749void kvm_mmu_module_exit(void); 651void kvm_mmu_module_exit(void);
750 652
@@ -756,9 +658,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
756 658
757int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 659int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
758void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 660void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
759void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
760 struct kvm_memory_slot *slot,
761 gfn_t gfn_offset, unsigned long mask);
762void kvm_mmu_zap_all(struct kvm *kvm); 661void kvm_mmu_zap_all(struct kvm *kvm);
763unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 662unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
764void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 663void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
@@ -767,12 +666,12 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
767 666
768int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 667int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
769 const void *val, int bytes); 668 const void *val, int bytes);
669int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
670 gpa_t addr, unsigned long *ret);
770u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 671u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
771 672
772extern bool tdp_enabled; 673extern bool tdp_enabled;
773 674
774u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
775
776/* control of guest tsc rate supported? */ 675/* control of guest tsc rate supported? */
777extern bool kvm_has_tsc_control; 676extern bool kvm_has_tsc_control;
778/* minimum supported tsc_khz for guests */ 677/* minimum supported tsc_khz for guests */
@@ -789,7 +688,6 @@ enum emulation_result {
789#define EMULTYPE_NO_DECODE (1 << 0) 688#define EMULTYPE_NO_DECODE (1 << 0)
790#define EMULTYPE_TRAP_UD (1 << 1) 689#define EMULTYPE_TRAP_UD (1 << 1)
791#define EMULTYPE_SKIP (1 << 2) 690#define EMULTYPE_SKIP (1 << 2)
792#define EMULTYPE_RETRY (1 << 3)
793int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, 691int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
794 int emulation_type, void *insn, int insn_len); 692 int emulation_type, void *insn, int insn_len);
795 693
@@ -801,7 +699,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
801 699
802void kvm_enable_efer_bits(u64); 700void kvm_enable_efer_bits(u64);
803int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 701int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
804int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); 702int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
805 703
806struct x86_emulate_ctxt; 704struct x86_emulate_ctxt;
807 705
@@ -813,8 +711,8 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
813void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 711void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
814int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 712int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
815 713
816int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 714int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
817 int reason, bool has_error_code, u32 error_code); 715 bool has_error_code, u32 error_code);
818 716
819int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 717int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
820int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 718int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -828,11 +726,10 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
828int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); 726int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
829 727
830int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 728int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
831int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); 729int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
832 730
833unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); 731unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
834void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 732void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
835bool kvm_rdpmc(struct kvm_vcpu *vcpu);
836 733
837void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 734void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
838void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 735void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
@@ -845,20 +742,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
845void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); 742void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
846bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 743bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
847 744
848static inline int __kvm_irq_line_state(unsigned long *irq_state, 745int kvm_pic_set_irq(void *opaque, int irq, int level);
849 int irq_source_id, int level)
850{
851 /* Logical OR for level trig interrupt */
852 if (level)
853 __set_bit(irq_source_id, irq_state);
854 else
855 __clear_bit(irq_source_id, irq_state);
856
857 return !!(*irq_state);
858}
859
860int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
861void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
862 746
863void kvm_inject_nmi(struct kvm_vcpu *vcpu); 747void kvm_inject_nmi(struct kvm_vcpu *vcpu);
864 748
@@ -866,14 +750,13 @@ int fx_init(struct kvm_vcpu *vcpu);
866 750
867void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); 751void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
868void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 752void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
869 const u8 *new, int bytes); 753 const u8 *new, int bytes,
870int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); 754 bool guest_initiated);
871int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 755int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
872void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 756void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
873int kvm_mmu_load(struct kvm_vcpu *vcpu); 757int kvm_mmu_load(struct kvm_vcpu *vcpu);
874void kvm_mmu_unload(struct kvm_vcpu *vcpu); 758void kvm_mmu_unload(struct kvm_vcpu *vcpu);
875void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 759void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
876gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
877gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 760gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
878 struct x86_exception *exception); 761 struct x86_exception *exception);
879gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 762gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
@@ -895,11 +778,6 @@ void kvm_disable_tdp(void);
895int complete_pio(struct kvm_vcpu *vcpu); 778int complete_pio(struct kvm_vcpu *vcpu);
896bool kvm_check_iopl(struct kvm_vcpu *vcpu); 779bool kvm_check_iopl(struct kvm_vcpu *vcpu);
897 780
898static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
899{
900 return gpa;
901}
902
903static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 781static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
904{ 782{
905 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 783 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
@@ -979,14 +857,15 @@ extern bool kvm_rebooting;
979 __ASM_SIZE(push) " $666b \n\t" \ 857 __ASM_SIZE(push) " $666b \n\t" \
980 "call kvm_spurious_fault \n\t" \ 858 "call kvm_spurious_fault \n\t" \
981 ".popsection \n\t" \ 859 ".popsection \n\t" \
982 _ASM_EXTABLE(666b, 667b) 860 ".pushsection __ex_table, \"a\" \n\t" \
861 _ASM_PTR " 666b, 667b \n\t" \
862 ".popsection"
983 863
984#define __kvm_handle_fault_on_reboot(insn) \ 864#define __kvm_handle_fault_on_reboot(insn) \
985 ____kvm_handle_fault_on_reboot(insn, "") 865 ____kvm_handle_fault_on_reboot(insn, "")
986 866
987#define KVM_ARCH_WANT_MMU_NOTIFIER 867#define KVM_ARCH_WANT_MMU_NOTIFIER
988int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 868int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
989int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
990int kvm_age_hva(struct kvm *kvm, unsigned long hva); 869int kvm_age_hva(struct kvm *kvm, unsigned long hva);
991int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 870int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
992void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 871void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
@@ -1011,17 +890,4 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1011 890
1012void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); 891void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1013 892
1014int kvm_is_in_guest(void);
1015
1016void kvm_pmu_init(struct kvm_vcpu *vcpu);
1017void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
1018void kvm_pmu_reset(struct kvm_vcpu *vcpu);
1019void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
1020bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
1021int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
1022int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
1023int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
1024void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
1025void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
1026
1027#endif /* _ASM_X86_KVM_HOST_H */ 893#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 5ed1f16187b..734c3767cfa 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -1,27 +1,107 @@
1#ifndef _ASM_X86_KVM_PARA_H 1#ifndef _ASM_X86_KVM_PARA_H
2#define _ASM_X86_KVM_PARA_H 2#define _ASM_X86_KVM_PARA_H
3 3
4#include <linux/types.h>
5#include <asm/hyperv.h>
6
7/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
8 * should be used to determine that a VM is running under KVM.
9 */
10#define KVM_CPUID_SIGNATURE 0x40000000
11
12/* This CPUID returns a feature bitmap in eax. Before enabling a particular
13 * paravirtualization, the appropriate feature bit should be checked.
14 */
15#define KVM_CPUID_FEATURES 0x40000001
16#define KVM_FEATURE_CLOCKSOURCE 0
17#define KVM_FEATURE_NOP_IO_DELAY 1
18#define KVM_FEATURE_MMU_OP 2
19/* This indicates that the new set of kvmclock msrs
20 * are available. The use of 0x11 and 0x12 is deprecated
21 */
22#define KVM_FEATURE_CLOCKSOURCE2 3
23#define KVM_FEATURE_ASYNC_PF 4
24#define KVM_FEATURE_STEAL_TIME 5
25
26/* The last 8 bits are used to indicate how to interpret the flags field
27 * in pvclock structure. If no bits are set, all flags are ignored.
28 */
29#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
30
31#define MSR_KVM_WALL_CLOCK 0x11
32#define MSR_KVM_SYSTEM_TIME 0x12
33
34#define KVM_MSR_ENABLED 1
35/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
36#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
37#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
38#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
39#define MSR_KVM_STEAL_TIME 0x4b564d03
40
41struct kvm_steal_time {
42 __u64 steal;
43 __u32 version;
44 __u32 flags;
45 __u32 pad[12];
46};
47
48#define KVM_STEAL_ALIGNMENT_BITS 5
49#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
50#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
51
52#define KVM_MAX_MMU_OP_BATCH 32
53
54#define KVM_ASYNC_PF_ENABLED (1 << 0)
55#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
56
57/* Operations for KVM_HC_MMU_OP */
58#define KVM_MMU_OP_WRITE_PTE 1
59#define KVM_MMU_OP_FLUSH_TLB 2
60#define KVM_MMU_OP_RELEASE_PT 3
61
62/* Payload for KVM_HC_MMU_OP */
63struct kvm_mmu_op_header {
64 __u32 op;
65 __u32 pad;
66};
67
68struct kvm_mmu_op_write_pte {
69 struct kvm_mmu_op_header header;
70 __u64 pte_phys;
71 __u64 pte_val;
72};
73
74struct kvm_mmu_op_flush_tlb {
75 struct kvm_mmu_op_header header;
76};
77
78struct kvm_mmu_op_release_pt {
79 struct kvm_mmu_op_header header;
80 __u64 pt_phys;
81};
82
83#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
84#define KVM_PV_REASON_PAGE_READY 2
85
86struct kvm_vcpu_pv_apf_data {
87 __u32 reason;
88 __u8 pad[60];
89 __u32 enabled;
90};
91
92#ifdef __KERNEL__
4#include <asm/processor.h> 93#include <asm/processor.h>
5#include <uapi/asm/kvm_para.h>
6 94
7extern void kvmclock_init(void); 95extern void kvmclock_init(void);
8extern int kvm_register_clock(char *txt); 96extern int kvm_register_clock(char *txt);
9 97
10#ifdef CONFIG_KVM_GUEST
11bool kvm_check_and_clear_guest_paused(void);
12#else
13static inline bool kvm_check_and_clear_guest_paused(void)
14{
15 return false;
16}
17#endif /* CONFIG_KVM_GUEST */
18 98
19/* This instruction is vmcall. On non-VT architectures, it will generate a 99/* This instruction is vmcall. On non-VT architectures, it will generate a
20 * trap that we will then rewrite to the appropriate instruction. 100 * trap that we will then rewrite to the appropriate instruction.
21 */ 101 */
22#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" 102#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
23 103
24/* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall 104/* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
25 * instruction. The hypervisor may replace it with something else but only the 105 * instruction. The hypervisor may replace it with something else but only the
26 * instructions are guaranteed to be supported. 106 * instructions are guaranteed to be supported.
27 * 107 *
@@ -90,19 +170,14 @@ static inline int kvm_para_available(void)
90 unsigned int eax, ebx, ecx, edx; 170 unsigned int eax, ebx, ecx, edx;
91 char signature[13]; 171 char signature[13];
92 172
93 if (boot_cpu_data.cpuid_level < 0) 173 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
94 return 0; /* So we don't blow up on old processors */ 174 memcpy(signature + 0, &ebx, 4);
95 175 memcpy(signature + 4, &ecx, 4);
96 if (cpu_has_hypervisor) { 176 memcpy(signature + 8, &edx, 4);
97 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); 177 signature[12] = 0;
98 memcpy(signature + 0, &ebx, 4);
99 memcpy(signature + 4, &ecx, 4);
100 memcpy(signature + 8, &edx, 4);
101 signature[12] = 0;
102 178
103 if (strcmp(signature, "KVMKVMKVM") == 0) 179 if (strcmp(signature, "KVMKVMKVM") == 0)
104 return 1; 180 return 1;
105 }
106 181
107 return 0; 182 return 0;
108} 183}
@@ -133,4 +208,6 @@ static inline void kvm_disable_steal_time(void)
133} 208}
134#endif 209#endif
135 210
211#endif /* __KERNEL__ */
212
136#endif /* _ASM_X86_KVM_PARA_H */ 213#endif /* _ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 2d89e3980cb..9cdae5d47e8 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5 5
6#include <asm/system.h>
6#include <linux/atomic.h> 7#include <linux/atomic.h>
7#include <asm/asm.h> 8#include <asm/asm.h>
8 9
@@ -124,11 +125,27 @@ static inline int local_add_negative(long i, local_t *l)
124 */ 125 */
125static inline long local_add_return(long i, local_t *l) 126static inline long local_add_return(long i, local_t *l)
126{ 127{
127 long __i = i; 128 long __i;
129#ifdef CONFIG_M386
130 unsigned long flags;
131 if (unlikely(boot_cpu_data.x86 <= 3))
132 goto no_xadd;
133#endif
134 /* Modern 486+ processor */
135 __i = i;
128 asm volatile(_ASM_XADD "%0, %1;" 136 asm volatile(_ASM_XADD "%0, %1;"
129 : "+r" (i), "+m" (l->a.counter) 137 : "+r" (i), "+m" (l->a.counter)
130 : : "memory"); 138 : : "memory");
131 return i + __i; 139 return i + __i;
140
141#ifdef CONFIG_M386
142no_xadd: /* Legacy 386 processor */
143 local_irq_save(flags);
144 __i = local_read(l);
145 local_set(l, i + __i);
146 local_irq_restore(flags);
147 return i + __i;
148#endif
132} 149}
133 150
134static inline long local_sub_return(long i, local_t *l) 151static inline long local_sub_return(long i, local_t *l)
diff --git a/arch/x86/include/asm/mach_timer.h b/arch/x86/include/asm/mach_timer.h
index 88d0c3c74c1..853728519ae 100644
--- a/arch/x86/include/asm/mach_timer.h
+++ b/arch/x86/include/asm/mach_timer.h
@@ -15,7 +15,7 @@
15 15
16#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ 16#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
17#define CALIBRATE_LATCH \ 17#define CALIBRATE_LATCH \
18 ((PIT_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) 18 ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
19 19
20static inline void mach_prepare_counter(void) 20static inline void mach_prepare_counter(void)
21{ 21{
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index a01e7ec7d23..72a8b52e7df 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -17,7 +17,7 @@
17#define NMI_REASON_CLEAR_IOCHK 0x08 17#define NMI_REASON_CLEAR_IOCHK 0x08
18#define NMI_REASON_CLEAR_MASK 0x0f 18#define NMI_REASON_CLEAR_MASK 0x0f
19 19
20static inline unsigned char default_get_nmi_reason(void) 20static inline unsigned char get_nmi_reason(void)
21{ 21{
22 return inb(NMI_REASON_PORT); 22 return inb(NMI_REASON_PORT);
23} 23}
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
index d354fb781c5..01fdf5674e2 100644
--- a/arch/x86/include/asm/mc146818rtc.h
+++ b/arch/x86/include/asm/mc146818rtc.h
@@ -5,6 +5,7 @@
5#define _ASM_X86_MC146818RTC_H 5#define _ASM_X86_MC146818RTC_H
6 6
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/system.h>
8#include <asm/processor.h> 9#include <asm/processor.h>
9#include <linux/mc146818rtc.h> 10#include <linux/mc146818rtc.h>
10 11
@@ -80,8 +81,8 @@ static inline unsigned char current_lock_cmos_reg(void)
80#else 81#else
81#define lock_cmos_prefix(reg) do {} while (0) 82#define lock_cmos_prefix(reg) do {} while (0)
82#define lock_cmos_suffix(reg) do {} while (0) 83#define lock_cmos_suffix(reg) do {} while (0)
83#define lock_cmos(reg) do { } while (0) 84#define lock_cmos(reg)
84#define unlock_cmos() do { } while (0) 85#define unlock_cmos()
85#define do_i_have_lock_cmos() 0 86#define do_i_have_lock_cmos() 0
86#define current_lock_cmos_reg() 0 87#define current_lock_cmos_reg() 0
87#endif 88#endif
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index ecdfee60ee4..c9321f34e55 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -1,32 +1,132 @@
1#ifndef _ASM_X86_MCE_H 1#ifndef _ASM_X86_MCE_H
2#define _ASM_X86_MCE_H 2#define _ASM_X86_MCE_H
3 3
4#include <uapi/asm/mce.h> 4#include <linux/types.h>
5 5#include <asm/ioctls.h>
6 6
7struct mca_config { 7/*
8 bool dont_log_ce; 8 * Machine Check support for x86
9 bool cmci_disabled; 9 */
10 bool ignore_ce; 10
11 bool disabled; 11/* MCG_CAP register defines */
12 bool ser; 12#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
13 bool bios_cmci_threshold; 13#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
14 u8 banks; 14#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
15 s8 bootlog; 15#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
16 int tolerant; 16#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
17 int monarch_timeout; 17#define MCG_EXT_CNT_SHIFT 16
18 int panic_timeout; 18#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
19 u32 rip_msr; 19#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
20
21/* MCG_STATUS register defines */
22#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
23#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
24#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
25
26/* MCi_STATUS register defines */
27#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
28#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
29#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
30#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
31#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
32#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
33#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
34#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
35#define MCI_STATUS_AR (1ULL<<55) /* Action required */
36
37/* MCi_MISC register defines */
38#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
39#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
40#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
41#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
42#define MCI_MISC_ADDR_PHYS 2 /* physical address */
43#define MCI_MISC_ADDR_MEM 3 /* memory address */
44#define MCI_MISC_ADDR_GENERIC 7 /* generic */
45
46/* CTL2 register defines */
47#define MCI_CTL2_CMCI_EN (1ULL << 30)
48#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
49
50#define MCJ_CTX_MASK 3
51#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
52#define MCJ_CTX_RANDOM 0 /* inject context: random */
53#define MCJ_CTX_PROCESS 1 /* inject context: process */
54#define MCJ_CTX_IRQ 2 /* inject context: IRQ */
55#define MCJ_NMI_BROADCAST 4 /* do NMI broadcasting */
56#define MCJ_EXCEPTION 8 /* raise as exception */
57
58/* Fields are zero when not available */
59struct mce {
60 __u64 status;
61 __u64 misc;
62 __u64 addr;
63 __u64 mcgstatus;
64 __u64 ip;
65 __u64 tsc; /* cpu time stamp counter */
66 __u64 time; /* wall time_t when error was detected */
67 __u8 cpuvendor; /* cpu vendor as encoded in system.h */
68 __u8 inject_flags; /* software inject flags */
69 __u16 pad;
70 __u32 cpuid; /* CPUID 1 EAX */
71 __u8 cs; /* code segment */
72 __u8 bank; /* machine check bank */
73 __u8 cpu; /* cpu number; obsolete; use extcpu now */
74 __u8 finished; /* entry is valid */
75 __u32 extcpu; /* linux cpu number that detected the error */
76 __u32 socketid; /* CPU socket ID */
77 __u32 apicid; /* CPU initial apic ID */
78 __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
79};
80
81/*
82 * This structure contains all data related to the MCE log. Also
83 * carries a signature to make it easier to find from external
84 * debugging tools. Each entry is only valid when its finished flag
85 * is set.
86 */
87
88#define MCE_LOG_LEN 32
89
90struct mce_log {
91 char signature[12]; /* "MACHINECHECK" */
92 unsigned len; /* = MCE_LOG_LEN */
93 unsigned next;
94 unsigned flags;
95 unsigned recordlen; /* length of struct mce */
96 struct mce entry[MCE_LOG_LEN];
20}; 97};
21 98
22extern struct mca_config mca_cfg; 99#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
23extern void mce_register_decode_chain(struct notifier_block *nb); 100
24extern void mce_unregister_decode_chain(struct notifier_block *nb); 101#define MCE_LOG_SIGNATURE "MACHINECHECK"
102
103#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
104#define MCE_GET_LOG_LEN _IOR('M', 2, int)
105#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
106
107/* Software defined banks */
108#define MCE_EXTENDED_BANK 128
109#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
110
111#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
112#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
113#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
114#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
115#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
116#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
117#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
118#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
119
120
121#ifdef __KERNEL__
122
123extern struct atomic_notifier_head x86_mce_decoder_chain;
25 124
26#include <linux/percpu.h> 125#include <linux/percpu.h>
27#include <linux/init.h> 126#include <linux/init.h>
28#include <linux/atomic.h> 127#include <linux/atomic.h>
29 128
129extern int mce_disabled;
30extern int mce_p5_enabled; 130extern int mce_p5_enabled;
31 131
32#ifdef CONFIG_X86_MCE 132#ifdef CONFIG_X86_MCE
@@ -49,7 +149,7 @@ static inline void enable_p5_mce(void) {}
49 149
50void mce_setup(struct mce *m); 150void mce_setup(struct mce *m);
51void mce_log(struct mce *m); 151void mce_log(struct mce *m);
52DECLARE_PER_CPU(struct device *, mce_device); 152DECLARE_PER_CPU(struct sys_device, mce_sysdev);
53 153
54/* 154/*
55 * Maximum banks number. 155 * Maximum banks number.
@@ -59,6 +159,8 @@ DECLARE_PER_CPU(struct device *, mce_device);
59#define MAX_NR_BANKS 32 159#define MAX_NR_BANKS 32
60 160
61#ifdef CONFIG_X86_MCE_INTEL 161#ifdef CONFIG_X86_MCE_INTEL
162extern int mce_cmci_disabled;
163extern int mce_ignore_ce;
62void mce_intel_feature_init(struct cpuinfo_x86 *c); 164void mce_intel_feature_init(struct cpuinfo_x86 *c);
63void cmci_clear(void); 165void cmci_clear(void);
64void cmci_reenable(void); 166void cmci_reenable(void);
@@ -99,10 +201,7 @@ int mce_notify_irq(void);
99void mce_notify_process(void); 201void mce_notify_process(void);
100 202
101DECLARE_PER_CPU(struct mce, injectm); 203DECLARE_PER_CPU(struct mce, injectm);
102 204extern struct file_operations mce_chrdev_ops;
103extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
104 const char __user *ubuf,
105 size_t usize, loff_t *off));
106 205
107/* 206/*
108 * Exception handler 207 * Exception handler
@@ -144,4 +243,5 @@ struct cper_sec_mem_err;
144extern void apei_mce_report_mem_error(int corrected, 243extern void apei_mce_report_mem_error(int corrected,
145 struct cper_sec_mem_err *mem_err); 244 struct cper_sec_mem_err *mem_err);
146 245
246#endif /* __KERNEL__ */
147#endif /* _ASM_X86_MCE_H */ 247#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 43d921b4752..24215072d0e 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -15,8 +15,8 @@ struct microcode_ops {
15 enum ucode_state (*request_microcode_user) (int cpu, 15 enum ucode_state (*request_microcode_user) (int cpu,
16 const void __user *buf, size_t size); 16 const void __user *buf, size_t size);
17 17
18 enum ucode_state (*request_microcode_fw) (int cpu, struct device *, 18 enum ucode_state (*request_microcode_fw) (int cpu,
19 bool refresh_fw); 19 struct device *device);
20 20
21 void (*microcode_fini_cpu) (int cpu); 21 void (*microcode_fini_cpu) (int cpu);
22 22
@@ -48,13 +48,17 @@ static inline struct microcode_ops * __init init_intel_microcode(void)
48 48
49#ifdef CONFIG_MICROCODE_AMD 49#ifdef CONFIG_MICROCODE_AMD
50extern struct microcode_ops * __init init_amd_microcode(void); 50extern struct microcode_ops * __init init_amd_microcode(void);
51extern void __exit exit_amd_microcode(void); 51
52static inline void get_ucode_data(void *to, const u8 *from, size_t n)
53{
54 memcpy(to, from, n);
55}
56
52#else 57#else
53static inline struct microcode_ops * __init init_amd_microcode(void) 58static inline struct microcode_ops * __init init_amd_microcode(void)
54{ 59{
55 return NULL; 60 return NULL;
56} 61}
57static inline void __exit exit_amd_microcode(void) {}
58#endif 62#endif
59 63
60#endif /* _ASM_X86_MICROCODE_H */ 64#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index cdbf3677610..69021528b43 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -25,8 +25,8 @@ void destroy_context(struct mm_struct *mm);
25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
26{ 26{
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 28 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); 29 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
30#endif 30#endif
31} 31}
32 32
@@ -37,8 +37,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
40 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); 40 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
41 this_cpu_write(cpu_tlbstate.active_mm, next); 41 percpu_write(cpu_tlbstate.active_mm, next);
42#endif 42#endif
43 cpumask_set_cpu(cpu, mm_cpumask(next)); 43 cpumask_set_cpu(cpu, mm_cpumask(next));
44 44
@@ -56,8 +56,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
56 } 56 }
57#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
58 else { 58 else {
59 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); 59 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
60 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); 60 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
61 61
62 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { 62 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
63 /* We were in lazy tlb mode and leave_mm disabled 63 /* We were in lazy tlb mode and leave_mm disabled
diff --git a/arch/x86/include/asm/mmzone.h b/arch/x86/include/asm/mmzone.h
index d497bc425ca..64217ea16a3 100644
--- a/arch/x86/include/asm/mmzone.h
+++ b/arch/x86/include/asm/mmzone.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include <asm/mmzone_32.h> 2# include "mmzone_32.h"
3#else 3#else
4# include <asm/mmzone_64.h> 4# include "mmzone_64.h"
5#endif 5#endif
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index eb05fb3b02f..55728e12147 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -61,4 +61,10 @@ static inline int pfn_valid(int pfn)
61 61
62#endif /* CONFIG_DISCONTIGMEM */ 62#endif /* CONFIG_DISCONTIGMEM */
63 63
64#ifdef CONFIG_NEED_MULTIPLE_NODES
65/* always use node 0 for bootmem on this numa platform */
66#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
67 (NODE_DATA(0)->bdata)
68#endif /* CONFIG_NEED_MULTIPLE_NODES */
69
64#endif /* _ASM_X86_MMZONE_32_H */ 70#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index e3b7819caee..9eae7752ae9 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -5,6 +5,8 @@
5 5
6#ifdef CONFIG_X86_64 6#ifdef CONFIG_X86_64
7/* X86_64 does not define MODULE_PROC_FAMILY */ 7/* X86_64 does not define MODULE_PROC_FAMILY */
8#elif defined CONFIG_M386
9#define MODULE_PROC_FAMILY "386 "
8#elif defined CONFIG_M486 10#elif defined CONFIG_M486
9#define MODULE_PROC_FAMILY "486 " 11#define MODULE_PROC_FAMILY "486 "
10#elif defined CONFIG_M586 12#elif defined CONFIG_M586
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 3e2f42a4b87..9c7d95f6174 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -40,7 +40,7 @@ extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
40 40
41#endif /* CONFIG_X86_64 */ 41#endif /* CONFIG_X86_64 */
42 42
43#ifdef CONFIG_EISA 43#if defined(CONFIG_MCA) || defined(CONFIG_EISA)
44extern int mp_bus_id_to_type[MAX_MP_BUSSES]; 44extern int mp_bus_id_to_type[MAX_MP_BUSSES];
45#endif 45#endif
46 46
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index b31f8c09827..c0a955a9a08 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -84,7 +84,7 @@ struct mpc_bus {
84#define BUSTYPE_EISA "EISA" 84#define BUSTYPE_EISA "EISA"
85#define BUSTYPE_ISA "ISA" 85#define BUSTYPE_ISA "ISA"
86#define BUSTYPE_INTERN "INTERN" /* Internal BUS */ 86#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
87#define BUSTYPE_MCA "MCA" /* Obsolete */ 87#define BUSTYPE_MCA "MCA"
88#define BUSTYPE_VL "VL" /* Local bus */ 88#define BUSTYPE_VL "VL" /* Local bus */
89#define BUSTYPE_PCI "PCI" 89#define BUSTYPE_PCI "PCI"
90#define BUSTYPE_PCMCIA "PCMCIA" 90#define BUSTYPE_PCMCIA "PCMCIA"
@@ -169,5 +169,6 @@ enum mp_bustype {
169 MP_BUS_ISA = 1, 169 MP_BUS_ISA = 1,
170 MP_BUS_EISA, 170 MP_BUS_EISA,
171 MP_BUS_PCI, 171 MP_BUS_PCI,
172 MP_BUS_MCA,
172}; 173};
173#endif /* _ASM_X86_MPSPEC_DEF_H */ 174#endif /* _ASM_X86_MPSPEC_DEF_H */
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index fc18bf3ce7c..719f00b28ff 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -26,25 +26,16 @@ extern struct sfi_rtc_table_entry sfi_mrtc_array[];
26 * identified via MSRs. 26 * identified via MSRs.
27 */ 27 */
28enum mrst_cpu_type { 28enum mrst_cpu_type {
29 /* 1 was Moorestown */ 29 MRST_CPU_CHIP_LINCROFT = 1,
30 MRST_CPU_CHIP_PENWELL = 2, 30 MRST_CPU_CHIP_PENWELL,
31}; 31};
32 32
33extern enum mrst_cpu_type __mrst_cpu_chip; 33extern enum mrst_cpu_type __mrst_cpu_chip;
34
35#ifdef CONFIG_X86_INTEL_MID
36
37static inline enum mrst_cpu_type mrst_identify_cpu(void) 34static inline enum mrst_cpu_type mrst_identify_cpu(void)
38{ 35{
39 return __mrst_cpu_chip; 36 return __mrst_cpu_chip;
40} 37}
41 38
42#else /* !CONFIG_X86_INTEL_MID */
43
44#define mrst_identify_cpu() (0)
45
46#endif /* !CONFIG_X86_INTEL_MID */
47
48enum mrst_timer_options { 39enum mrst_timer_options {
49 MRST_TIMER_DEFAULT, 40 MRST_TIMER_DEFAULT,
50 MRST_TIMER_APBT_ONLY, 41 MRST_TIMER_APBT_ONLY,
@@ -53,13 +44,6 @@ enum mrst_timer_options {
53 44
54extern enum mrst_timer_options mrst_timer_options; 45extern enum mrst_timer_options mrst_timer_options;
55 46
56/*
57 * Penwell uses spread spectrum clock, so the freq number is not exactly
58 * the same as reported by MSR based on SDM.
59 */
60#define PENWELL_FSB_FREQ_83SKU 83200
61#define PENWELL_FSB_FREQ_100SKU 99840
62
63#define SFI_MTMR_MAX_NUM 8 47#define SFI_MTMR_MAX_NUM 8
64#define SFI_MRTC_MAX 8 48#define SFI_MRTC_MAX 8
65 49
@@ -67,7 +51,7 @@ extern struct console early_mrst_console;
67extern void mrst_early_console_init(void); 51extern void mrst_early_console_init(void);
68 52
69extern struct console early_hsu_console; 53extern struct console early_hsu_console;
70extern void hsu_early_console_init(const char *); 54extern void hsu_early_console_init(void);
71 55
72extern void intel_scu_devices_create(void); 56extern void intel_scu_devices_create(void);
73extern void intel_scu_devices_destroy(void); 57extern void intel_scu_devices_destroy(void);
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 9264802e282..084ef95274c 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -1,10 +1,18 @@
1#ifndef _ASM_X86_MSR_H 1#ifndef _ASM_X86_MSR_H
2#define _ASM_X86_MSR_H 2#define _ASM_X86_MSR_H
3 3
4#include <uapi/asm/msr.h> 4#include <asm/msr-index.h>
5 5
6#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
7 7
8#include <linux/types.h>
9#include <linux/ioctl.h>
10
11#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
12#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
13
14#ifdef __KERNEL__
15
8#include <asm/asm.h> 16#include <asm/asm.h>
9#include <asm/errno.h> 17#include <asm/errno.h>
10#include <asm/cpumask.h> 18#include <asm/cpumask.h>
@@ -107,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
107 115
108extern unsigned long long native_read_tsc(void); 116extern unsigned long long native_read_tsc(void);
109 117
110extern int rdmsr_safe_regs(u32 regs[8]); 118extern int native_rdmsr_safe_regs(u32 regs[8]);
111extern int wrmsr_safe_regs(u32 regs[8]); 119extern int native_wrmsr_safe_regs(u32 regs[8]);
112 120
113static __always_inline unsigned long long __native_read_tsc(void) 121static __always_inline unsigned long long __native_read_tsc(void)
114{ 122{
@@ -179,6 +187,43 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
179 return err; 187 return err;
180} 188}
181 189
190static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
191{
192 u32 gprs[8] = { 0 };
193 int err;
194
195 gprs[1] = msr;
196 gprs[7] = 0x9c5a203a;
197
198 err = native_rdmsr_safe_regs(gprs);
199
200 *p = gprs[0] | ((u64)gprs[2] << 32);
201
202 return err;
203}
204
205static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
206{
207 u32 gprs[8] = { 0 };
208
209 gprs[0] = (u32)val;
210 gprs[1] = msr;
211 gprs[2] = val >> 32;
212 gprs[7] = 0x9c5a203a;
213
214 return native_wrmsr_safe_regs(gprs);
215}
216
217static inline int rdmsr_safe_regs(u32 regs[8])
218{
219 return native_rdmsr_safe_regs(regs);
220}
221
222static inline int wrmsr_safe_regs(u32 regs[8])
223{
224 return native_wrmsr_safe_regs(regs);
225}
226
182#define rdtscl(low) \ 227#define rdtscl(low) \
183 ((low) = (u32)__native_read_tsc()) 228 ((low) = (u32)__native_read_tsc())
184 229
@@ -192,8 +237,6 @@ do { \
192 (high) = (u32)(_l >> 32); \ 237 (high) = (u32)(_l >> 32); \
193} while (0) 238} while (0)
194 239
195#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
196
197#define rdtscp(low, high, aux) \ 240#define rdtscp(low, high, aux) \
198do { \ 241do { \
199 unsigned long long _val = native_read_tscp(&(aux)); \ 242 unsigned long long _val = native_read_tscp(&(aux)); \
@@ -205,7 +248,8 @@ do { \
205 248
206#endif /* !CONFIG_PARAVIRT */ 249#endif /* !CONFIG_PARAVIRT */
207 250
208#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ 251
252#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
209 (u32)((val) >> 32)) 253 (u32)((val) >> 32))
210 254
211#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) 255#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
@@ -263,5 +307,6 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
263 return wrmsr_safe_regs(regs); 307 return wrmsr_safe_regs(regs);
264} 308}
265#endif /* CONFIG_SMP */ 309#endif /* CONFIG_SMP */
310#endif /* __KERNEL__ */
266#endif /* __ASSEMBLY__ */ 311#endif /* __ASSEMBLY__ */
267#endif /* _ASM_X86_MSR_H */ 312#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index e235582f993..4365ffdb461 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -23,8 +23,89 @@
23#ifndef _ASM_X86_MTRR_H 23#ifndef _ASM_X86_MTRR_H
24#define _ASM_X86_MTRR_H 24#define _ASM_X86_MTRR_H
25 25
26#include <uapi/asm/mtrr.h> 26#include <linux/types.h>
27#include <linux/ioctl.h>
28#include <linux/errno.h>
27 29
30#define MTRR_IOCTL_BASE 'M'
31
32struct mtrr_sentry {
33 unsigned long base; /* Base address */
34 unsigned int size; /* Size of region */
35 unsigned int type; /* Type of region */
36};
37
38/* Warning: this structure has a different order from i386
39 on x86-64. The 32bit emulation code takes care of that.
40 But you need to use this for 64bit, otherwise your X server
41 will break. */
42
43#ifdef __i386__
44struct mtrr_gentry {
45 unsigned int regnum; /* Register number */
46 unsigned long base; /* Base address */
47 unsigned int size; /* Size of region */
48 unsigned int type; /* Type of region */
49};
50
51#else /* __i386__ */
52
53struct mtrr_gentry {
54 unsigned long base; /* Base address */
55 unsigned int size; /* Size of region */
56 unsigned int regnum; /* Register number */
57 unsigned int type; /* Type of region */
58};
59#endif /* !__i386__ */
60
61struct mtrr_var_range {
62 __u32 base_lo;
63 __u32 base_hi;
64 __u32 mask_lo;
65 __u32 mask_hi;
66};
67
68/* In the Intel processor's MTRR interface, the MTRR type is always held in
69 an 8 bit field: */
70typedef __u8 mtrr_type;
71
72#define MTRR_NUM_FIXED_RANGES 88
73#define MTRR_MAX_VAR_RANGES 256
74
75struct mtrr_state_type {
76 struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
77 mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
78 unsigned char enabled;
79 unsigned char have_fixed;
80 mtrr_type def_type;
81};
82
83#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
84#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
85
86/* These are the various ioctls */
87#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
88#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
89#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
90#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
91#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
92#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
93#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
94#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
95#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
96#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
97
98/* These are the region types */
99#define MTRR_TYPE_UNCACHABLE 0
100#define MTRR_TYPE_WRCOMB 1
101/*#define MTRR_TYPE_ 2*/
102/*#define MTRR_TYPE_ 3*/
103#define MTRR_TYPE_WRTHROUGH 4
104#define MTRR_TYPE_WRPROT 5
105#define MTRR_TYPE_WRBACK 6
106#define MTRR_NUM_TYPES 7
107
108#ifdef __KERNEL__
28 109
29/* The following functions are for use by other drivers */ 110/* The following functions are for use by other drivers */
30# ifdef CONFIG_MTRR 111# ifdef CONFIG_MTRR
@@ -119,4 +200,6 @@ struct mtrr_gentry32 {
119 _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) 200 _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
120#endif /* CONFIG_COMPAT */ 201#endif /* CONFIG_COMPAT */
121 202
203#endif /* __KERNEL__ */
204
122#endif /* _ASM_X86_MTRR_H */ 205#endif /* _ASM_X86_MTRR_H */
diff --git a/arch/x86/include/asm/mutex.h b/arch/x86/include/asm/mutex.h
index 7d3a4827539..a731b9c573a 100644
--- a/arch/x86/include/asm/mutex.h
+++ b/arch/x86/include/asm/mutex.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include <asm/mutex_32.h> 2# include "mutex_32.h"
3#else 3#else
4# include <asm/mutex_64.h> 4# include "mutex_64.h"
5#endif 5#endif
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c0fa356e90d..4886a68f267 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -22,44 +22,27 @@ void arch_trigger_all_cpu_backtrace(void);
22#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 22#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
23#endif 23#endif
24 24
25#define NMI_FLAG_FIRST 1 25/*
26 26 * Define some priorities for the nmi notifier call chain.
27enum { 27 *
28 NMI_LOCAL=0, 28 * Create a local nmi bit that has a higher priority than
29 NMI_UNKNOWN, 29 * external nmis, because the local ones are more frequent.
30 NMI_SERR, 30 *
31 NMI_IO_CHECK, 31 * Also setup some default high/normal/low settings for
32 NMI_MAX 32 * subsystems to registers with. Using 4 bits to separate
33}; 33 * the priorities. This can go a lot higher if needed be.
34 34 */
35#define NMI_DONE 0 35
36#define NMI_HANDLED 1 36#define NMI_LOCAL_SHIFT 16 /* randomly picked */
37 37#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
38typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); 38#define NMI_HIGH_PRIOR (1ULL << 8)
39 39#define NMI_NORMAL_PRIOR (1ULL << 4)
40struct nmiaction { 40#define NMI_LOW_PRIOR (1ULL << 0)
41 struct list_head list; 41#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
42 nmi_handler_t handler; 42#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
43 unsigned long flags; 43#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
44 const char *name;
45};
46
47#define register_nmi_handler(t, fn, fg, n, init...) \
48({ \
49 static struct nmiaction init fn##_na = { \
50 .handler = (fn), \
51 .name = (n), \
52 .flags = (fg), \
53 }; \
54 __register_nmi_handler((t), &fn##_na); \
55})
56
57int __register_nmi_handler(unsigned int, struct nmiaction *);
58
59void unregister_nmi_handler(unsigned int, const char *);
60 44
61void stop_nmi(void); 45void stop_nmi(void);
62void restart_nmi(void); 46void restart_nmi(void);
63void local_touch_nmi(void);
64 47
65#endif /* _ASM_X86_NMI_H */ 48#endif /* _ASM_X86_NMI_H */
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index aff2b335610..405b4032a60 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -87,11 +87,7 @@
87#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 87#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
88#define P6_NOP5_ATOMIC P6_NOP5 88#define P6_NOP5_ATOMIC P6_NOP5
89 89
90#ifdef __ASSEMBLY__
91#define _ASM_MK_NOP(x) .byte x
92#else
93#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" 90#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
94#endif
95 91
96#if defined(CONFIG_MK7) 92#if defined(CONFIG_MK7)
97#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1) 93#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 49119fcea2d..bfacd2ccf65 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -53,9 +53,9 @@ static inline int numa_cpu_node(int cpu)
53#endif /* CONFIG_NUMA */ 53#endif /* CONFIG_NUMA */
54 54
55#ifdef CONFIG_X86_32 55#ifdef CONFIG_X86_32
56# include <asm/numa_32.h> 56# include "numa_32.h"
57#else 57#else
58# include <asm/numa_64.h> 58# include "numa_64.h"
59#endif 59#endif
60 60
61#ifdef CONFIG_NUMA 61#ifdef CONFIG_NUMA
diff --git a/arch/x86/include/asm/numachip/numachip.h b/arch/x86/include/asm/numachip/numachip.h
deleted file mode 100644
index 1c6f7f6212c..00000000000
--- a/arch/x86/include/asm/numachip/numachip.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Numascale NumaConnect-specific header file
7 *
8 * Copyright (C) 2012 Numascale AS. All rights reserved.
9 *
10 * Send feedback to <support@numascale.com>
11 *
12 */
13
14#ifndef _ASM_X86_NUMACHIP_NUMACHIP_H
15#define _ASM_X86_NUMACHIP_NUMACHIP_H
16
17extern int __init pci_numachip_init(void);
18
19#endif /* _ASM_X86_NUMACHIP_NUMACHIP_H */
diff --git a/arch/x86/include/asm/numachip/numachip_csr.h b/arch/x86/include/asm/numachip/numachip_csr.h
deleted file mode 100644
index 660f843df92..00000000000
--- a/arch/x86/include/asm/numachip/numachip_csr.h
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Numascale NumaConnect-Specific Header file
7 *
8 * Copyright (C) 2011 Numascale AS. All rights reserved.
9 *
10 * Send feedback to <support@numascale.com>
11 *
12 */
13
14#ifndef _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
15#define _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
16
17#include <linux/numa.h>
18#include <linux/percpu.h>
19#include <linux/io.h>
20#include <linux/swab.h>
21#include <asm/types.h>
22#include <asm/processor.h>
23
24#define CSR_NODE_SHIFT 16
25#define CSR_NODE_BITS(p) (((unsigned long)(p)) << CSR_NODE_SHIFT)
26#define CSR_NODE_MASK 0x0fff /* 4K nodes */
27
28/* 32K CSR space, b15 indicates geo/non-geo */
29#define CSR_OFFSET_MASK 0x7fffUL
30
31/* Global CSR space covers all 4K possible nodes with 64K CSR space per node */
32#define NUMACHIP_GCSR_BASE 0x3fff00000000ULL
33#define NUMACHIP_GCSR_LIM 0x3fff0fffffffULL
34#define NUMACHIP_GCSR_SIZE (NUMACHIP_GCSR_LIM - NUMACHIP_GCSR_BASE + 1)
35
36/*
37 * Local CSR space starts in global CSR space with "nodeid" = 0xfff0, however
38 * when using the direct mapping on x86_64, both start and size needs to be
39 * aligned with PMD_SIZE which is 2M
40 */
41#define NUMACHIP_LCSR_BASE 0x3ffffe000000ULL
42#define NUMACHIP_LCSR_LIM 0x3fffffffffffULL
43#define NUMACHIP_LCSR_SIZE (NUMACHIP_LCSR_LIM - NUMACHIP_LCSR_BASE + 1)
44
45static inline void *gcsr_address(int node, unsigned long offset)
46{
47 return __va(NUMACHIP_GCSR_BASE | (1UL << 15) |
48 CSR_NODE_BITS(node & CSR_NODE_MASK) | (offset & CSR_OFFSET_MASK));
49}
50
51static inline void *lcsr_address(unsigned long offset)
52{
53 return __va(NUMACHIP_LCSR_BASE | (1UL << 15) |
54 CSR_NODE_BITS(0xfff0) | (offset & CSR_OFFSET_MASK));
55}
56
57static inline unsigned int read_gcsr(int node, unsigned long offset)
58{
59 return swab32(readl(gcsr_address(node, offset)));
60}
61
62static inline void write_gcsr(int node, unsigned long offset, unsigned int val)
63{
64 writel(swab32(val), gcsr_address(node, offset));
65}
66
67static inline unsigned int read_lcsr(unsigned long offset)
68{
69 return swab32(readl(lcsr_address(offset)));
70}
71
72static inline void write_lcsr(unsigned long offset, unsigned int val)
73{
74 writel(swab32(val), lcsr_address(offset));
75}
76
77/* ========================================================================= */
78/* CSR_G0_STATE_CLEAR */
79/* ========================================================================= */
80
81#define CSR_G0_STATE_CLEAR (0x000 + (0 << 12))
82union numachip_csr_g0_state_clear {
83 unsigned int v;
84 struct numachip_csr_g0_state_clear_s {
85 unsigned int _state:2;
86 unsigned int _rsvd_2_6:5;
87 unsigned int _lost:1;
88 unsigned int _rsvd_8_31:24;
89 } s;
90};
91
92/* ========================================================================= */
93/* CSR_G0_NODE_IDS */
94/* ========================================================================= */
95
96#define CSR_G0_NODE_IDS (0x008 + (0 << 12))
97union numachip_csr_g0_node_ids {
98 unsigned int v;
99 struct numachip_csr_g0_node_ids_s {
100 unsigned int _initialid:16;
101 unsigned int _nodeid:12;
102 unsigned int _rsvd_28_31:4;
103 } s;
104};
105
106/* ========================================================================= */
107/* CSR_G3_EXT_IRQ_GEN */
108/* ========================================================================= */
109
110#define CSR_G3_EXT_IRQ_GEN (0x030 + (3 << 12))
111union numachip_csr_g3_ext_irq_gen {
112 unsigned int v;
113 struct numachip_csr_g3_ext_irq_gen_s {
114 unsigned int _vector:8;
115 unsigned int _msgtype:3;
116 unsigned int _index:5;
117 unsigned int _destination_apic_id:16;
118 } s;
119};
120
121/* ========================================================================= */
122/* CSR_G3_EXT_IRQ_STATUS */
123/* ========================================================================= */
124
125#define CSR_G3_EXT_IRQ_STATUS (0x034 + (3 << 12))
126union numachip_csr_g3_ext_irq_status {
127 unsigned int v;
128 struct numachip_csr_g3_ext_irq_status_s {
129 unsigned int _result:32;
130 } s;
131};
132
133/* ========================================================================= */
134/* CSR_G3_EXT_IRQ_DEST */
135/* ========================================================================= */
136
137#define CSR_G3_EXT_IRQ_DEST (0x038 + (3 << 12))
138union numachip_csr_g3_ext_irq_dest {
139 unsigned int v;
140 struct numachip_csr_g3_ext_irq_dest_s {
141 unsigned int _irq:8;
142 unsigned int _rsvd_8_31:24;
143 } s;
144};
145
146/* ========================================================================= */
147/* CSR_G3_NC_ATT_MAP_SELECT */
148/* ========================================================================= */
149
150#define CSR_G3_NC_ATT_MAP_SELECT (0x7fc + (3 << 12))
151union numachip_csr_g3_nc_att_map_select {
152 unsigned int v;
153 struct numachip_csr_g3_nc_att_map_select_s {
154 unsigned int _upper_address_bits:4;
155 unsigned int _select_ram:4;
156 unsigned int _rsvd_8_31:24;
157 } s;
158};
159
160/* ========================================================================= */
161/* CSR_G3_NC_ATT_MAP_SELECT_0-255 */
162/* ========================================================================= */
163
164#define CSR_G3_NC_ATT_MAP_SELECT_0 (0x800 + (3 << 12))
165
166#endif /* _ASM_X86_NUMACHIP_NUMACHIP_CSR_H */
167
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 72f9adf6eca..87bdbca72f9 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -100,6 +100,25 @@ extern void olpc_xo1_pm_wakeup_clear(u16 value);
100 100
101extern int pci_olpc_init(void); 101extern int pci_olpc_init(void);
102 102
103/* EC related functions */
104
105extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
106 unsigned char *outbuf, size_t outlen);
107
108/* EC commands */
109
110#define EC_FIRMWARE_REV 0x08
111#define EC_WRITE_SCI_MASK 0x1b
112#define EC_WAKE_UP_WLAN 0x24
113#define EC_WLAN_LEAVE_RESET 0x25
114#define EC_READ_EB_MODE 0x2a
115#define EC_SET_SCI_INHIBIT 0x32
116#define EC_SET_SCI_INHIBIT_RELEASE 0x34
117#define EC_WLAN_ENTER_RESET 0x35
118#define EC_WRITE_EXT_SCI_MASK 0x38
119#define EC_SCI_QUERY 0x84
120#define EC_EXT_SCI_QUERY 0x85
121
103/* SCI source values */ 122/* SCI source values */
104 123
105#define EC_SCI_SRC_EMPTY 0x00 124#define EC_SCI_SRC_EMPTY 0x00
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index ef17af01347..ade619ff9e2 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -15,8 +15,8 @@
15 */ 15 */
16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
17 17
18#define THREAD_SIZE_ORDER 1 18#define THREAD_ORDER 1
19#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 19#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
20 20
21#define STACKFAULT_STACK 0 21#define STACKFAULT_STACK 0
22#define DOUBLEFAULT_STACK 1 22#define DOUBLEFAULT_STACK 1
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 320f7bb95f7..7639dbf5d22 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_PAGE_64_DEFS_H 1#ifndef _ASM_X86_PAGE_64_DEFS_H
2#define _ASM_X86_PAGE_64_DEFS_H 2#define _ASM_X86_PAGE_64_DEFS_H
3 3
4#define THREAD_SIZE_ORDER 1 4#define THREAD_ORDER 1
5#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 5#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
6#define CURRENT_MASK (~(THREAD_SIZE - 1)) 6#define CURRENT_MASK (~(THREAD_SIZE - 1))
7 7
8#define EXCEPTION_STACK_ORDER 0 8#define EXCEPTION_STACK_ORDER 0
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index e21fdd10479..bce688d54c1 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -55,6 +55,7 @@ extern unsigned long init_memory_mapping(unsigned long start,
55 unsigned long end); 55 unsigned long end);
56 56
57extern void initmem_init(void); 57extern void initmem_init(void);
58extern void free_initmem(void);
58 59
59#endif /* !__ASSEMBLY__ */ 60#endif /* !__ASSEMBLY__ */
60 61
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5edd1742cfd..a7d2db9a74f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -10,7 +10,6 @@
10#include <asm/paravirt_types.h> 10#include <asm/paravirt_types.h>
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#include <linux/bug.h>
14#include <linux/types.h> 13#include <linux/types.h>
15#include <linux/cpumask.h> 14#include <linux/cpumask.h>
16 15
@@ -128,11 +127,21 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
128 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); 127 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
129} 128}
130 129
130static inline int paravirt_rdmsr_regs(u32 *regs)
131{
132 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
133}
134
131static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 135static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
132{ 136{
133 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); 137 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
134} 138}
135 139
140static inline int paravirt_wrmsr_regs(u32 *regs)
141{
142 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
143}
144
136/* These should all do BUG_ON(_err), but our headers are too tangled. */ 145/* These should all do BUG_ON(_err), but our headers are too tangled. */
137#define rdmsr(msr, val1, val2) \ 146#define rdmsr(msr, val1, val2) \
138do { \ 147do { \
@@ -166,6 +175,9 @@ do { \
166 _err; \ 175 _err; \
167}) 176})
168 177
178#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
179#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
180
169static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) 181static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
170{ 182{
171 int err; 183 int err;
@@ -173,6 +185,32 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
173 *p = paravirt_read_msr(msr, &err); 185 *p = paravirt_read_msr(msr, &err);
174 return err; 186 return err;
175} 187}
188static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
189{
190 u32 gprs[8] = { 0 };
191 int err;
192
193 gprs[1] = msr;
194 gprs[7] = 0x9c5a203a;
195
196 err = paravirt_rdmsr_regs(gprs);
197
198 *p = gprs[0] | ((u64)gprs[2] << 32);
199
200 return err;
201}
202
203static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
204{
205 u32 gprs[8] = { 0 };
206
207 gprs[0] = (u32)val;
208 gprs[1] = msr;
209 gprs[2] = val >> 32;
210 gprs[7] = 0x9c5a203a;
211
212 return paravirt_wrmsr_regs(gprs);
213}
176 214
177static inline u64 paravirt_read_tsc(void) 215static inline u64 paravirt_read_tsc(void)
178{ 216{
@@ -192,9 +230,9 @@ static inline unsigned long long paravirt_sched_clock(void)
192 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); 230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
193} 231}
194 232
195struct static_key; 233struct jump_label_key;
196extern struct static_key paravirt_steal_enabled; 234extern struct jump_label_key paravirt_steal_enabled;
197extern struct static_key paravirt_steal_rq_enabled; 235extern struct jump_label_key paravirt_steal_rq_enabled;
198 236
199static inline u64 paravirt_steal_clock(int cpu) 237static inline u64 paravirt_steal_clock(int cpu)
200{ 238{
@@ -213,8 +251,6 @@ do { \
213 high = _l >> 32; \ 251 high = _l >> 32; \
214} while (0) 252} while (0)
215 253
216#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
217
218static inline unsigned long long paravirt_rdtscp(unsigned int *aux) 254static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
219{ 255{
220 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); 256 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
@@ -360,10 +396,9 @@ static inline void __flush_tlb_single(unsigned long addr)
360 396
361static inline void flush_tlb_others(const struct cpumask *cpumask, 397static inline void flush_tlb_others(const struct cpumask *cpumask,
362 struct mm_struct *mm, 398 struct mm_struct *mm,
363 unsigned long start, 399 unsigned long va)
364 unsigned long end)
365{ 400{
366 PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end); 401 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
367} 402}
368 403
369static inline int paravirt_pgd_alloc(struct mm_struct *mm) 404static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -528,6 +563,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
528 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); 563 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
529} 564}
530 565
566#ifdef CONFIG_TRANSPARENT_HUGEPAGE
531static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 567static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
532 pmd_t *pmdp, pmd_t pmd) 568 pmd_t *pmdp, pmd_t pmd)
533{ 569{
@@ -538,6 +574,7 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
538 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, 574 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
539 native_pmd_val(pmd)); 575 native_pmd_val(pmd));
540} 576}
577#endif
541 578
542static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 579static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
543{ 580{
@@ -985,8 +1022,10 @@ extern void default_banner(void);
985 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ 1022 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
986 ) 1023 )
987 1024
988#define GET_CR2_INTO_RAX \ 1025#define GET_CR2_INTO_RCX \
989 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2) 1026 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1027 movq %rax, %rcx; \
1028 xorq %rax, %rax;
990 1029
991#define PARAVIRT_ADJUST_EXCEPTION_FRAME \ 1030#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
992 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ 1031 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 142236ed83a..8e8b9a4987e 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -153,7 +153,9 @@ struct pv_cpu_ops {
153 /* MSR, PMC and TSR operations. 153 /* MSR, PMC and TSR operations.
154 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ 154 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
155 u64 (*read_msr)(unsigned int msr, int *err); 155 u64 (*read_msr)(unsigned int msr, int *err);
156 int (*rdmsr_regs)(u32 *regs);
156 int (*write_msr)(unsigned int msr, unsigned low, unsigned high); 157 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
158 int (*wrmsr_regs)(u32 *regs);
157 159
158 u64 (*read_tsc)(void); 160 u64 (*read_tsc)(void);
159 u64 (*read_pmc)(int counter); 161 u64 (*read_pmc)(int counter);
@@ -248,8 +250,7 @@ struct pv_mmu_ops {
248 void (*flush_tlb_single)(unsigned long addr); 250 void (*flush_tlb_single)(unsigned long addr);
249 void (*flush_tlb_others)(const struct cpumask *cpus, 251 void (*flush_tlb_others)(const struct cpumask *cpus,
250 struct mm_struct *mm, 252 struct mm_struct *mm,
251 unsigned long start, 253 unsigned long va);
252 unsigned long end);
253 254
254 /* Hooks for allocating and freeing a pagetable top-level */ 255 /* Hooks for allocating and freeing a pagetable top-level */
255 int (*pgd_alloc)(struct mm_struct *mm); 256 int (*pgd_alloc)(struct mm_struct *mm);
diff --git a/arch/x86/include/asm/parport.h b/arch/x86/include/asm/parport.h
index 0d2d3b29118..3c4ffeb467e 100644
--- a/arch/x86/include/asm/parport.h
+++ b/arch/x86/include/asm/parport.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_PARPORT_H 1#ifndef _ASM_X86_PARPORT_H
2#define _ASM_X86_PARPORT_H 2#define _ASM_X86_PARPORT_H
3 3
4static int parport_pc_find_isa_ports(int autoirq, int autodma); 4static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
5static int parport_pc_find_nonpci_ports(int autoirq, int autodma) 5static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
6{ 6{
7 return parport_pc_find_isa_ports(autoirq, autodma); 7 return parport_pc_find_isa_ports(autoirq, autodma);
8} 8}
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index dba7805176b..d498943b906 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -112,28 +112,19 @@ static inline void x86_teardown_msi_irq(unsigned int irq)
112{ 112{
113 x86_msi.teardown_msi_irq(irq); 113 x86_msi.teardown_msi_irq(irq);
114} 114}
115static inline void x86_restore_msi_irqs(struct pci_dev *dev, int irq)
116{
117 x86_msi.restore_msi_irqs(dev, irq);
118}
119#define arch_setup_msi_irqs x86_setup_msi_irqs 115#define arch_setup_msi_irqs x86_setup_msi_irqs
120#define arch_teardown_msi_irqs x86_teardown_msi_irqs 116#define arch_teardown_msi_irqs x86_teardown_msi_irqs
121#define arch_teardown_msi_irq x86_teardown_msi_irq 117#define arch_teardown_msi_irq x86_teardown_msi_irq
122#define arch_restore_msi_irqs x86_restore_msi_irqs
123/* implemented in arch/x86/kernel/apic/io_apic. */ 118/* implemented in arch/x86/kernel/apic/io_apic. */
124int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 119int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
125void native_teardown_msi_irq(unsigned int irq); 120void native_teardown_msi_irq(unsigned int irq);
126void native_restore_msi_irqs(struct pci_dev *dev, int irq);
127/* default to the implementation in drivers/lib/msi.c */ 121/* default to the implementation in drivers/lib/msi.c */
128#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS 122#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
129#define HAVE_DEFAULT_MSI_RESTORE_IRQS
130void default_teardown_msi_irqs(struct pci_dev *dev); 123void default_teardown_msi_irqs(struct pci_dev *dev);
131void default_restore_msi_irqs(struct pci_dev *dev, int irq);
132#else 124#else
133#define native_setup_msi_irqs NULL 125#define native_setup_msi_irqs NULL
134#define native_teardown_msi_irq NULL 126#define native_teardown_msi_irq NULL
135#define default_teardown_msi_irqs NULL 127#define default_teardown_msi_irqs NULL
136#define default_restore_msi_irqs NULL
137#endif 128#endif
138 129
139#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) 130#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
@@ -141,7 +132,7 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq);
141#endif /* __KERNEL__ */ 132#endif /* __KERNEL__ */
142 133
143#ifdef CONFIG_X86_64 134#ifdef CONFIG_X86_64
144#include <asm/pci_64.h> 135#include "pci_64.h"
145#endif 136#endif
146 137
147/* implement the pci_ DMA API in terms of the generic device dma_ one */ 138/* implement the pci_ DMA API in terms of the generic device dma_ one */
@@ -171,16 +162,4 @@ cpumask_of_pcibus(const struct pci_bus *bus)
171} 162}
172#endif 163#endif
173 164
174struct pci_setup_rom {
175 struct setup_data data;
176 uint16_t vendor;
177 uint16_t devid;
178 uint64_t pcilen;
179 unsigned long segment;
180 unsigned long bus;
181 unsigned long device;
182 unsigned long function;
183 uint8_t romdata[0];
184};
185
186#endif /* _ASM_X86_PCI_H */ 165#endif /* _ASM_X86_PCI_H */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 747e5a38b59..704526734be 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -7,13 +7,9 @@
7#undef DEBUG 7#undef DEBUG
8 8
9#ifdef DEBUG 9#ifdef DEBUG
10#define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__) 10#define DBG(x...) printk(x)
11#else 11#else
12#define DBG(fmt, ...) \ 12#define DBG(x...)
13do { \
14 if (0) \
15 printk(fmt, ##__VA_ARGS__); \
16} while (0)
17#endif 13#endif
18 14
19#define PCI_PROBE_BIOS 0x0001 15#define PCI_PROBE_BIOS 0x0001
@@ -48,6 +44,8 @@ enum pci_bf_sort_state {
48 44
49/* pci-i386.c */ 45/* pci-i386.c */
50 46
47extern unsigned int pcibios_max_latency;
48
51void pcibios_resource_survey(void); 49void pcibios_resource_survey(void);
52void pcibios_set_cache_line_size(void); 50void pcibios_set_cache_line_size(void);
53 51
@@ -101,11 +99,10 @@ struct pci_raw_ops {
101 int reg, int len, u32 val); 99 int reg, int len, u32 val);
102}; 100};
103 101
104extern const struct pci_raw_ops *raw_pci_ops; 102extern struct pci_raw_ops *raw_pci_ops;
105extern const struct pci_raw_ops *raw_pci_ext_ops; 103extern struct pci_raw_ops *raw_pci_ext_ops;
106 104
107extern const struct pci_raw_ops pci_mmcfg; 105extern struct pci_raw_ops pci_direct_conf1;
108extern const struct pci_raw_ops pci_direct_conf1;
109extern bool port_cf9_safe; 106extern bool port_cf9_safe;
110 107
111/* arch_initcall level */ 108/* arch_initcall level */
@@ -140,11 +137,6 @@ struct pci_mmcfg_region {
140 137
141extern int __init pci_mmcfg_arch_init(void); 138extern int __init pci_mmcfg_arch_init(void);
142extern void __init pci_mmcfg_arch_free(void); 139extern void __init pci_mmcfg_arch_free(void);
143extern int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg);
144extern void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg);
145extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
146 phys_addr_t addr);
147extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end);
148extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus); 140extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
149 141
150extern struct list_head pci_mmcfg_list; 142extern struct list_head pci_mmcfg_list;
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0da5200ee79..3470c9d0ebb 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -46,7 +46,7 @@
46 46
47#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
48#define __percpu_prefix "%%"__stringify(__percpu_seg)":" 48#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
49#define __my_cpu_offset this_cpu_read(this_cpu_off) 49#define __my_cpu_offset percpu_read(this_cpu_off)
50 50
51/* 51/*
52 * Compared to the generic __my_cpu_offset version, the following 52 * Compared to the generic __my_cpu_offset version, the following
@@ -351,15 +351,23 @@ do { \
351}) 351})
352 352
353/* 353/*
354 * this_cpu_read() makes gcc load the percpu variable every time it is 354 * percpu_read() makes gcc load the percpu variable every time it is
355 * accessed while this_cpu_read_stable() allows the value to be cached. 355 * accessed while percpu_read_stable() allows the value to be cached.
356 * this_cpu_read_stable() is more efficient and can be used if its value 356 * percpu_read_stable() is more efficient and can be used if its value
357 * is guaranteed to be valid across cpus. The current users include 357 * is guaranteed to be valid across cpus. The current users include
358 * get_current() and get_thread_info() both of which are actually 358 * get_current() and get_thread_info() both of which are actually
359 * per-thread variables implemented as per-cpu variables and thus 359 * per-thread variables implemented as per-cpu variables and thus
360 * stable for the duration of the respective task. 360 * stable for the duration of the respective task.
361 */ 361 */
362#define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) 362#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
363#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
364#define percpu_write(var, val) percpu_to_op("mov", var, val)
365#define percpu_add(var, val) percpu_add_op(var, val)
366#define percpu_sub(var, val) percpu_add_op(var, -(val))
367#define percpu_and(var, val) percpu_to_op("and", var, val)
368#define percpu_or(var, val) percpu_to_op("or", var, val)
369#define percpu_xor(var, val) percpu_to_op("xor", var, val)
370#define percpu_inc(var) percpu_unary_op("inc", var)
363 371
364#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 372#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
365#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 373#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -406,6 +414,23 @@ do { \
406#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 414#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
407#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 415#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
408 416
417#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
418#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
419#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
420#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
421#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
422#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
423#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
424#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
425#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
426#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
427#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
428#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
429#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
430#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
431#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
432
433#ifndef CONFIG_M386
409#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) 434#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
410#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) 435#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
411#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) 436#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
@@ -420,20 +445,29 @@ do { \
420#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 445#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
421#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 446#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
422 447
448#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
449#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
450#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
451#endif /* !CONFIG_M386 */
452
423#ifdef CONFIG_X86_CMPXCHG64 453#ifdef CONFIG_X86_CMPXCHG64
424#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ 454#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
425({ \ 455({ \
426 bool __ret; \ 456 char __ret; \
427 typeof(pcp1) __o1 = (o1), __n1 = (n1); \ 457 typeof(o1) __o1 = o1; \
428 typeof(pcp2) __o2 = (o2), __n2 = (n2); \ 458 typeof(o1) __n1 = n1; \
459 typeof(o2) __o2 = o2; \
460 typeof(o2) __n2 = n2; \
461 typeof(o2) __dummy = n2; \
429 asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \ 462 asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
430 : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \ 463 : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
431 : "b" (__n1), "c" (__n2), "a" (__o1)); \ 464 : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
432 __ret; \ 465 __ret; \
433}) 466})
434 467
435#define __this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 468#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
436#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 469#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
470#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
437#endif /* CONFIG_X86_CMPXCHG64 */ 471#endif /* CONFIG_X86_CMPXCHG64 */
438 472
439/* 473/*
@@ -461,28 +495,44 @@ do { \
461#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 495#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
462#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 496#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
463 497
498#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
499#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
500#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
501#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
502#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
503#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
504
464/* 505/*
465 * Pretty complex macro to generate cmpxchg16 instruction. The instruction 506 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
466 * is not supported on early AMD64 processors so we must be able to emulate 507 * is not supported on early AMD64 processors so we must be able to emulate
467 * it in software. The address used in the cmpxchg16 instruction must be 508 * it in software. The address used in the cmpxchg16 instruction must be
468 * aligned to a 16 byte boundary. 509 * aligned to a 16 byte boundary.
469 */ 510 */
470#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \ 511#ifdef CONFIG_SMP
512#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP3
513#else
514#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP2
515#endif
516#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
471({ \ 517({ \
472 bool __ret; \ 518 char __ret; \
473 typeof(pcp1) __o1 = (o1), __n1 = (n1); \ 519 typeof(o1) __o1 = o1; \
474 typeof(pcp2) __o2 = (o2), __n2 = (n2); \ 520 typeof(o1) __n1 = n1; \
475 alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \ 521 typeof(o2) __o2 = o2; \
476 "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \ 522 typeof(o2) __n2 = n2; \
523 typeof(o2) __dummy; \
524 alternative_io(CMPXCHG16B_EMU_CALL, \
525 "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \
477 X86_FEATURE_CX16, \ 526 X86_FEATURE_CX16, \
478 ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \ 527 ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
479 "+m" (pcp2), "+d" (__o2)), \ 528 "S" (&pcp1), "b"(__n1), "c"(__n2), \
480 "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \ 529 "a"(__o1), "d"(__o2) : "memory"); \
481 __ret; \ 530 __ret; \
482}) 531})
483 532
484#define __this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 533#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
485#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 534#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
535#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
486 536
487#endif 537#endif
488 538
@@ -501,11 +551,7 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
501{ 551{
502 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; 552 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
503 553
504#ifdef CONFIG_X86_64 554 return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0;
505 return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_8(*a)) != 0;
506#else
507 return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_4(*a)) != 0;
508#endif
509} 555}
510 556
511static inline int x86_this_cpu_variable_test_bit(int nr, 557static inline int x86_this_cpu_variable_test_bit(int nr,
@@ -548,12 +594,6 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
548 { [0 ... NR_CPUS-1] = _initvalue }; \ 594 { [0 ... NR_CPUS-1] = _initvalue }; \
549 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map 595 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
550 596
551#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
552 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
553 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
554 { [0 ... NR_CPUS-1] = _initvalue }; \
555 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
556
557#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ 597#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
558 EXPORT_PER_CPU_SYMBOL(_name) 598 EXPORT_PER_CPU_SYMBOL(_name)
559 599
@@ -562,11 +602,6 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
562 extern __typeof__(_type) *_name##_early_ptr; \ 602 extern __typeof__(_type) *_name##_early_ptr; \
563 extern __typeof__(_type) _name##_early_map[] 603 extern __typeof__(_type) _name##_early_map[]
564 604
565#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
566 DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
567 extern __typeof__(_type) *_name##_early_ptr; \
568 extern __typeof__(_type) _name##_early_map[]
569
570#define early_per_cpu_ptr(_name) (_name##_early_ptr) 605#define early_per_cpu_ptr(_name) (_name##_early_ptr)
571#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) 606#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
572#define early_per_cpu(_name, _cpu) \ 607#define early_per_cpu(_name, _cpu) \
@@ -578,18 +613,12 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
578#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 613#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
579 DEFINE_PER_CPU(_type, _name) = _initvalue 614 DEFINE_PER_CPU(_type, _name) = _initvalue
580 615
581#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
582 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
583
584#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ 616#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
585 EXPORT_PER_CPU_SYMBOL(_name) 617 EXPORT_PER_CPU_SYMBOL(_name)
586 618
587#define DECLARE_EARLY_PER_CPU(_type, _name) \ 619#define DECLARE_EARLY_PER_CPU(_type, _name) \
588 DECLARE_PER_CPU(_type, _name) 620 DECLARE_PER_CPU(_type, _name)
589 621
590#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
591 DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
592
593#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) 622#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
594#define early_per_cpu_ptr(_name) NULL 623#define early_per_cpu_ptr(_name) NULL
595/* no early_per_cpu_map() */ 624/* no early_per_cpu_map() */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 4fabcdf1cfa..094fb30817a 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -5,10 +5,11 @@
5 * Performance event hw details: 5 * Performance event hw details:
6 */ 6 */
7 7
8#define INTEL_PMC_MAX_GENERIC 32 8#define X86_PMC_MAX_GENERIC 32
9#define INTEL_PMC_MAX_FIXED 3 9#define X86_PMC_MAX_FIXED 3
10#define INTEL_PMC_IDX_FIXED 32
11 10
11#define X86_PMC_IDX_GENERIC 0
12#define X86_PMC_IDX_FIXED 32
12#define X86_PMC_IDX_MAX 64 13#define X86_PMC_IDX_MAX 64
13 14
14#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 15#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
@@ -22,16 +23,12 @@
22#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) 23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
23#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) 24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
24#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) 25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
25#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
26#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) 26#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
27#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) 27#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
28#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) 28#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31 31
32#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
33#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
34
35#define AMD64_EVENTSEL_EVENT \ 32#define AMD64_EVENTSEL_EVENT \
36 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) 33 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
37#define INTEL_ARCH_EVENT_MASK \ 34#define INTEL_ARCH_EVENT_MASK \
@@ -46,17 +43,14 @@
46#define AMD64_RAW_EVENT_MASK \ 43#define AMD64_RAW_EVENT_MASK \
47 (X86_RAW_EVENT_MASK | \ 44 (X86_RAW_EVENT_MASK | \
48 AMD64_EVENTSEL_EVENT) 45 AMD64_EVENTSEL_EVENT)
49#define AMD64_NUM_COUNTERS 4
50#define AMD64_NUM_COUNTERS_CORE 6
51 46
52#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 47#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
53#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 48#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
54#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 49#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ 50#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
56 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) 51 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
57 52
58#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 53#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
59#define ARCH_PERFMON_EVENTS_COUNT 7
60 54
61/* 55/*
62 * Intel "Architectural Performance Monitoring" CPUID 56 * Intel "Architectural Performance Monitoring" CPUID
@@ -72,19 +66,6 @@ union cpuid10_eax {
72 unsigned int full; 66 unsigned int full;
73}; 67};
74 68
75union cpuid10_ebx {
76 struct {
77 unsigned int no_unhalted_core_cycles:1;
78 unsigned int no_instructions_retired:1;
79 unsigned int no_unhalted_reference_cycles:1;
80 unsigned int no_llc_reference:1;
81 unsigned int no_llc_misses:1;
82 unsigned int no_branch_instruction_retired:1;
83 unsigned int no_branch_misses_retired:1;
84 } split;
85 unsigned int full;
86};
87
88union cpuid10_edx { 69union cpuid10_edx {
89 struct { 70 struct {
90 unsigned int num_counters_fixed:5; 71 unsigned int num_counters_fixed:5;
@@ -94,15 +75,6 @@ union cpuid10_edx {
94 unsigned int full; 75 unsigned int full;
95}; 76};
96 77
97struct x86_pmu_capability {
98 int version;
99 int num_counters_gp;
100 int num_counters_fixed;
101 int bit_width_gp;
102 int bit_width_fixed;
103 unsigned int events_mask;
104 int events_mask_len;
105};
106 78
107/* 79/*
108 * Fixed-purpose performance events: 80 * Fixed-purpose performance events:
@@ -111,24 +83,23 @@ struct x86_pmu_capability {
111/* 83/*
112 * All 3 fixed-mode PMCs are configured via this single MSR: 84 * All 3 fixed-mode PMCs are configured via this single MSR:
113 */ 85 */
114#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d 86#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
115 87
116/* 88/*
117 * The counts are available in three separate MSRs: 89 * The counts are available in three separate MSRs:
118 */ 90 */
119 91
120/* Instr_Retired.Any: */ 92/* Instr_Retired.Any: */
121#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 93#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
122#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) 94#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
123 95
124/* CPU_CLK_Unhalted.Core: */ 96/* CPU_CLK_Unhalted.Core: */
125#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a 97#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
126#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) 98#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
127 99
128/* CPU_CLK_Unhalted.Ref: */ 100/* CPU_CLK_Unhalted.Ref: */
129#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b 101#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
130#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) 102#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
131#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
132 103
133/* 104/*
134 * We model BTS tracing as another fixed-mode PMC. 105 * We model BTS tracing as another fixed-mode PMC.
@@ -137,75 +108,33 @@ struct x86_pmu_capability {
137 * values are used by actual fixed events and higher values are used 108 * values are used by actual fixed events and higher values are used
138 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. 109 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
139 */ 110 */
140#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16) 111#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
141 112
142/* 113/* IbsFetchCtl bits/masks */
143 * IBS cpuid feature detection
144 */
145
146#define IBS_CPUID_FEATURES 0x8000001b
147
148/*
149 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
150 * bit 0 is used to indicate the existence of IBS.
151 */
152#define IBS_CAPS_AVAIL (1U<<0)
153#define IBS_CAPS_FETCHSAM (1U<<1)
154#define IBS_CAPS_OPSAM (1U<<2)
155#define IBS_CAPS_RDWROPCNT (1U<<3)
156#define IBS_CAPS_OPCNT (1U<<4)
157#define IBS_CAPS_BRNTRGT (1U<<5)
158#define IBS_CAPS_OPCNTEXT (1U<<6)
159#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
160
161#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
162 | IBS_CAPS_FETCHSAM \
163 | IBS_CAPS_OPSAM)
164
165/*
166 * IBS APIC setup
167 */
168#define IBSCTL 0x1cc
169#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
170#define IBSCTL_LVT_OFFSET_MASK 0x0F
171
172/* ibs fetch bits/masks */
173#define IBS_FETCH_RAND_EN (1ULL<<57) 114#define IBS_FETCH_RAND_EN (1ULL<<57)
174#define IBS_FETCH_VAL (1ULL<<49) 115#define IBS_FETCH_VAL (1ULL<<49)
175#define IBS_FETCH_ENABLE (1ULL<<48) 116#define IBS_FETCH_ENABLE (1ULL<<48)
176#define IBS_FETCH_CNT 0xFFFF0000ULL 117#define IBS_FETCH_CNT 0xFFFF0000ULL
177#define IBS_FETCH_MAX_CNT 0x0000FFFFULL 118#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
178 119
179/* ibs op bits/masks */ 120/* IbsOpCtl bits */
180/* lower 4 bits of the current count are ignored: */
181#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
182#define IBS_OP_CNT_CTL (1ULL<<19) 121#define IBS_OP_CNT_CTL (1ULL<<19)
183#define IBS_OP_VAL (1ULL<<18) 122#define IBS_OP_VAL (1ULL<<18)
184#define IBS_OP_ENABLE (1ULL<<17) 123#define IBS_OP_ENABLE (1ULL<<17)
185#define IBS_OP_MAX_CNT 0x0000FFFFULL 124#define IBS_OP_MAX_CNT 0x0000FFFFULL
186#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ 125#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
187#define IBS_RIP_INVALID (1ULL<<38)
188
189#ifdef CONFIG_X86_LOCAL_APIC
190extern u32 get_ibs_caps(void);
191#else
192static inline u32 get_ibs_caps(void) { return 0; }
193#endif
194 126
195#ifdef CONFIG_PERF_EVENTS 127#ifdef CONFIG_PERF_EVENTS
196extern void perf_events_lapic_init(void); 128extern void perf_events_lapic_init(void);
197 129
130#define PERF_EVENT_INDEX_OFFSET 0
131
198/* 132/*
199 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise 133 * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
200 * unused and ABI specified to be 0, so nobody should care what we do with 134 * This flag is otherwise unused and ABI specified to be 0, so nobody should
201 * them. 135 * care what we do with it.
202 *
203 * EXACT - the IP points to the exact instruction that triggered the
204 * event (HW bugs exempt).
205 * VM - original X86_VM_MASK; see set_linear_ip().
206 */ 136 */
207#define PERF_EFLAGS_EXACT (1UL << 3) 137#define PERF_EFLAGS_EXACT (1UL << 3)
208#define PERF_EFLAGS_VM (1UL << 5)
209 138
210struct pt_regs; 139struct pt_regs;
211extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 140extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
@@ -230,38 +159,8 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
230 ); \ 159 ); \
231} 160}
232 161
233struct perf_guest_switch_msr {
234 unsigned msr;
235 u64 host, guest;
236};
237
238extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
239extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
240extern void perf_check_microcode(void);
241#else 162#else
242static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
243{
244 *nr = 0;
245 return NULL;
246}
247
248static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
249{
250 memset(cap, 0, sizeof(*cap));
251}
252
253static inline void perf_events_lapic_init(void) { } 163static inline void perf_events_lapic_init(void) { }
254static inline void perf_check_microcode(void) { }
255#endif 164#endif
256 165
257#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
258 extern void amd_pmu_enable_virt(void);
259 extern void amd_pmu_disable_virt(void);
260#else
261 static inline void amd_pmu_enable_virt(void) { }
262 static inline void amd_pmu_disable_virt(void) { }
263#endif
264
265#define arch_perf_out_copy_user copy_from_user_nmi
266
267#endif /* _ASM_X86_PERF_EVENT_H */ 166#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index f2b489cf160..98391db840c 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -2,9 +2,9 @@
2#define _ASM_X86_PGTABLE_2LEVEL_H 2#define _ASM_X86_PGTABLE_2LEVEL_H
3 3
4#define pte_ERROR(e) \ 4#define pte_ERROR(e) \
5 pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low) 5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
6#define pgd_ERROR(e) \ 6#define pgd_ERROR(e) \
7 pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e)) 7 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
8 8
9/* 9/*
10 * Certain architectures need to do special things when PTEs 10 * Certain architectures need to do special things when PTEs
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 4cc9f2b7cdc..effff47a3c8 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -9,13 +9,13 @@
9 */ 9 */
10 10
11#define pte_ERROR(e) \ 11#define pte_ERROR(e) \
12 pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \ 12 printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
13 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) 13 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
14#define pmd_ERROR(e) \ 14#define pmd_ERROR(e) \
15 pr_err("%s:%d: bad pmd %p(%016Lx)\n", \ 15 printk("%s:%d: bad pmd %p(%016Lx).\n", \
16 __FILE__, __LINE__, &(e), pmd_val(e)) 16 __FILE__, __LINE__, &(e), pmd_val(e))
17#define pgd_ERROR(e) \ 17#define pgd_ERROR(e) \
18 pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ 18 printk("%s:%d: bad pgd %p(%016Lx).\n", \
19 __FILE__, __LINE__, &(e), pgd_val(e)) 19 __FILE__, __LINE__, &(e), pgd_val(e))
20 20
21/* Rules for using set_pte: the pte being assigned *must* be 21/* Rules for using set_pte: the pte being assigned *must* be
@@ -31,60 +31,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
31 ptep->pte_low = pte.pte_low; 31 ptep->pte_low = pte.pte_low;
32} 32}
33 33
34#define pmd_read_atomic pmd_read_atomic
35/*
36 * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
37 * a "*pmdp" dereference done by gcc. Problem is, in certain places
38 * where pte_offset_map_lock is called, concurrent page faults are
39 * allowed, if the mmap_sem is hold for reading. An example is mincore
40 * vs page faults vs MADV_DONTNEED. On the page fault side
41 * pmd_populate rightfully does a set_64bit, but if we're reading the
42 * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
43 * because gcc will not read the 64bit of the pmd atomically. To fix
44 * this all places running pmd_offset_map_lock() while holding the
45 * mmap_sem in read mode, shall read the pmdp pointer using this
46 * function to know if the pmd is null nor not, and in turn to know if
47 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
48 * operations.
49 *
50 * Without THP if the mmap_sem is hold for reading, the pmd can only
51 * transition from null to not null while pmd_read_atomic runs. So
52 * we can always return atomic pmd values with this function.
53 *
54 * With THP if the mmap_sem is hold for reading, the pmd can become
55 * trans_huge or none or point to a pte (and in turn become "stable")
56 * at any time under pmd_read_atomic. We could read it really
57 * atomically here with a atomic64_read for the THP enabled case (and
58 * it would be a whole lot simpler), but to avoid using cmpxchg8b we
59 * only return an atomic pmdval if the low part of the pmdval is later
60 * found stable (i.e. pointing to a pte). And we're returning a none
61 * pmdval if the low part of the pmd is none. In some cases the high
62 * and low part of the pmdval returned may not be consistent if THP is
63 * enabled (the low part may point to previously mapped hugepage,
64 * while the high part may point to a more recently mapped hugepage),
65 * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
66 * of the pmd to be read atomically to decide if the pmd is unstable
67 * or not, with the only exception of when the low part of the pmd is
68 * zero in which case we return a none pmd.
69 */
70static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
71{
72 pmdval_t ret;
73 u32 *tmp = (u32 *)pmdp;
74
75 ret = (pmdval_t) (*tmp);
76 if (ret) {
77 /*
78 * If the low part is null, we must not read the high part
79 * or we can end up with a partial pmd.
80 */
81 smp_rmb();
82 ret |= ((pmdval_t)*(tmp + 1)) << 32;
83 }
84
85 return (pmd_t) { ret };
86}
87
88static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 34static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
89{ 35{
90 set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); 36 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5199db2923d..18601c86fab 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -146,7 +146,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
146 146
147static inline int pmd_large(pmd_t pte) 147static inline int pmd_large(pmd_t pte)
148{ 148{
149 return pmd_flags(pte) & _PAGE_PSE; 149 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
150 (_PAGE_PSE | _PAGE_PRESENT);
150} 151}
151 152
152#ifdef CONFIG_TRANSPARENT_HUGEPAGE 153#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -383,9 +384,9 @@ pte_t *populate_extra_pte(unsigned long vaddr);
383#endif /* __ASSEMBLY__ */ 384#endif /* __ASSEMBLY__ */
384 385
385#ifdef CONFIG_X86_32 386#ifdef CONFIG_X86_32
386# include <asm/pgtable_32.h> 387# include "pgtable_32.h"
387#else 388#else
388# include <asm/pgtable_64.h> 389# include "pgtable_64.h"
389#endif 390#endif
390 391
391#ifndef __ASSEMBLY__ 392#ifndef __ASSEMBLY__
@@ -404,14 +405,7 @@ static inline int pte_same(pte_t a, pte_t b)
404 405
405static inline int pte_present(pte_t a) 406static inline int pte_present(pte_t a)
406{ 407{
407 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | 408 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
408 _PAGE_NUMA);
409}
410
411#define pte_accessible pte_accessible
412static inline int pte_accessible(pte_t a)
413{
414 return pte_flags(a) & _PAGE_PRESENT;
415} 409}
416 410
417static inline int pte_hidden(pte_t pte) 411static inline int pte_hidden(pte_t pte)
@@ -421,14 +415,7 @@ static inline int pte_hidden(pte_t pte)
421 415
422static inline int pmd_present(pmd_t pmd) 416static inline int pmd_present(pmd_t pmd)
423{ 417{
424 /* 418 return pmd_flags(pmd) & _PAGE_PRESENT;
425 * Checking for _PAGE_PSE is needed too because
426 * split_huge_page will temporarily clear the present bit (but
427 * the _PAGE_PSE flag will remain set at all times while the
428 * _PAGE_PRESENT bit is clear).
429 */
430 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
431 _PAGE_NUMA);
432} 419}
433 420
434static inline int pmd_none(pmd_t pmd) 421static inline int pmd_none(pmd_t pmd)
@@ -487,11 +474,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
487 474
488static inline int pmd_bad(pmd_t pmd) 475static inline int pmd_bad(pmd_t pmd)
489{ 476{
490#ifdef CONFIG_NUMA_BALANCING
491 /* pmd_numa check */
492 if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
493 return 0;
494#endif
495 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; 477 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
496} 478}
497 479
@@ -721,7 +703,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
721 pte_update(mm, addr, ptep); 703 pte_update(mm, addr, ptep);
722} 704}
723 705
724#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) 706#define flush_tlb_fix_spurious_fault(vma, address)
725 707
726#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 708#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
727 709
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 8faa215a503..0c92113c4cb 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -71,7 +71,6 @@ do { \
71 * tables contain all the necessary information. 71 * tables contain all the necessary information.
72 */ 72 */
73#define update_mmu_cache(vma, address, ptep) do { } while (0) 73#define update_mmu_cache(vma, address, ptep) do { } while (0)
74#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
75 74
76#endif /* !__ASSEMBLY__ */ 75#endif /* !__ASSEMBLY__ */
77 76
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 47356f9df82..975f709e09a 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -26,16 +26,16 @@ extern pgd_t init_level4_pgt[];
26extern void paging_init(void); 26extern void paging_init(void);
27 27
28#define pte_ERROR(e) \ 28#define pte_ERROR(e) \
29 pr_err("%s:%d: bad pte %p(%016lx)\n", \ 29 printk("%s:%d: bad pte %p(%016lx).\n", \
30 __FILE__, __LINE__, &(e), pte_val(e)) 30 __FILE__, __LINE__, &(e), pte_val(e))
31#define pmd_ERROR(e) \ 31#define pmd_ERROR(e) \
32 pr_err("%s:%d: bad pmd %p(%016lx)\n", \ 32 printk("%s:%d: bad pmd %p(%016lx).\n", \
33 __FILE__, __LINE__, &(e), pmd_val(e)) 33 __FILE__, __LINE__, &(e), pmd_val(e))
34#define pud_ERROR(e) \ 34#define pud_ERROR(e) \
35 pr_err("%s:%d: bad pud %p(%016lx)\n", \ 35 printk("%s:%d: bad pud %p(%016lx).\n", \
36 __FILE__, __LINE__, &(e), pud_val(e)) 36 __FILE__, __LINE__, &(e), pud_val(e))
37#define pgd_ERROR(e) \ 37#define pgd_ERROR(e) \
38 pr_err("%s:%d: bad pgd %p(%016lx)\n", \ 38 printk("%s:%d: bad pgd %p(%016lx).\n", \
39 __FILE__, __LINE__, &(e), pgd_val(e)) 39 __FILE__, __LINE__, &(e), pgd_val(e))
40 40
41struct mm_struct; 41struct mm_struct;
@@ -143,7 +143,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
143#define pte_unmap(pte) ((void)(pte))/* NOP */ 143#define pte_unmap(pte) ((void)(pte))/* NOP */
144 144
145#define update_mmu_cache(vma, address, ptep) do { } while (0) 145#define update_mmu_cache(vma, address, ptep) do { } while (0)
146#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
147 146
148/* Encode and de-code a swap entry */ 147/* Encode and de-code a swap entry */
149#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE 148#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 3c32db8c539..013286a10c2 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -64,26 +64,6 @@
64#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) 64#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
65#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) 65#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
66 66
67/*
68 * _PAGE_NUMA indicates that this page will trigger a numa hinting
69 * minor page fault to gather numa placement statistics (see
70 * pte_numa()). The bit picked (8) is within the range between
71 * _PAGE_FILE (6) and _PAGE_PROTNONE (8) bits. Therefore, it doesn't
72 * require changes to the swp entry format because that bit is always
73 * zero when the pte is not present.
74 *
75 * The bit picked must be always zero when the pmd is present and not
76 * present, so that we don't lose information when we set it while
77 * atomically clearing the present bit.
78 *
79 * Because we shared the same bit (8) with _PAGE_PROTNONE this can be
80 * interpreted as _PAGE_NUMA only in places that _PAGE_PROTNONE
81 * couldn't reach, like handle_mm_fault() (see access_error in
82 * arch/x86/mm/fault.c, the vma protection must not be PROT_NONE for
83 * handle_mm_fault() to be invoked).
84 */
85#define _PAGE_NUMA _PAGE_PROTNONE
86
87#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ 67#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
88 _PAGE_ACCESSED | _PAGE_DIRTY) 68 _PAGE_ACCESSED | _PAGE_DIRTY)
89#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ 69#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
@@ -194,9 +174,9 @@
194#endif 174#endif
195 175
196#ifdef CONFIG_X86_32 176#ifdef CONFIG_X86_32
197# include <asm/pgtable_32_types.h> 177# include "pgtable_32_types.h"
198#else 178#else
199# include <asm/pgtable_64_types.h> 179# include "pgtable_64_types.h"
200#endif 180#endif
201 181
202#ifndef __ASSEMBLY__ 182#ifndef __ASSEMBLY__
@@ -323,9 +303,11 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pte);
323 303
324extern void native_pagetable_reserve(u64 start, u64 end); 304extern void native_pagetable_reserve(u64 start, u64 end);
325#ifdef CONFIG_X86_32 305#ifdef CONFIG_X86_32
326extern void native_pagetable_init(void); 306extern void native_pagetable_setup_start(pgd_t *base);
307extern void native_pagetable_setup_done(pgd_t *base);
327#else 308#else
328#define native_pagetable_init paging_init 309#define native_pagetable_setup_start x86_init_pgd_noop
310#define native_pagetable_setup_done x86_init_pgd_noop
329#endif 311#endif
330 312
331struct seq_file; 313struct seq_file;
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index f565f6dd59d..bb7133dc155 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -1,5 +1,13 @@
1#ifdef __KERNEL__
1# ifdef CONFIG_X86_32 2# ifdef CONFIG_X86_32
2# include <asm/posix_types_32.h> 3# include "posix_types_32.h"
3# else 4# else
4# include <asm/posix_types_64.h> 5# include "posix_types_64.h"
5# endif 6# endif
7#else
8# ifdef __i386__
9# include "posix_types_32.h"
10# else
11# include "posix_types_64.h"
12# endif
13#endif
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 39fb618e221..2dddb317bb3 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -1,11 +1,102 @@
1#ifndef _ASM_X86_PROCESSOR_FLAGS_H 1#ifndef _ASM_X86_PROCESSOR_FLAGS_H
2#define _ASM_X86_PROCESSOR_FLAGS_H 2#define _ASM_X86_PROCESSOR_FLAGS_H
3/* Various flags defined: can be included from assembler. */
3 4
4#include <uapi/asm/processor-flags.h> 5/*
6 * EFLAGS bits
7 */
8#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
9#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
10#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
11#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
12#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
13#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
14#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
15#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
16#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
17#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
18#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
19#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
20#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
21#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
22#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
23#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
24#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
5 25
26/*
27 * Basic CPU control in CR0
28 */
29#define X86_CR0_PE 0x00000001 /* Protection Enable */
30#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
31#define X86_CR0_EM 0x00000004 /* Emulation */
32#define X86_CR0_TS 0x00000008 /* Task Switched */
33#define X86_CR0_ET 0x00000010 /* Extension Type */
34#define X86_CR0_NE 0x00000020 /* Numeric Error */
35#define X86_CR0_WP 0x00010000 /* Write Protect */
36#define X86_CR0_AM 0x00040000 /* Alignment Mask */
37#define X86_CR0_NW 0x20000000 /* Not Write-through */
38#define X86_CR0_CD 0x40000000 /* Cache Disable */
39#define X86_CR0_PG 0x80000000 /* Paging */
40
41/*
42 * Paging options in CR3
43 */
44#define X86_CR3_PWT 0x00000008 /* Page Write Through */
45#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
46
47/*
48 * Intel CPU features in CR4
49 */
50#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
51#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
52#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
53#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
54#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
55#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
56#define X86_CR4_MCE 0x00000040 /* Machine check enable */
57#define X86_CR4_PGE 0x00000080 /* enable global pages */
58#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
62#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
63#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
64#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
65
66/*
67 * x86-64 Task Priority Register, CR8
68 */
69#define X86_CR8_TPR 0x0000000F /* task priority register */
70
71/*
72 * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
73 */
74
75/*
76 * NSC/Cyrix CPU configuration register indexes
77 */
78#define CX86_PCR0 0x20
79#define CX86_GCR 0xb8
80#define CX86_CCR0 0xc0
81#define CX86_CCR1 0xc1
82#define CX86_CCR2 0xc2
83#define CX86_CCR3 0xc3
84#define CX86_CCR4 0xe8
85#define CX86_CCR5 0xe9
86#define CX86_CCR6 0xea
87#define CX86_CCR7 0xeb
88#define CX86_PCR1 0xf0
89#define CX86_DIR0 0xfe
90#define CX86_DIR1 0xff
91#define CX86_ARR_BASE 0xc4
92#define CX86_RCR_BASE 0xdc
93
94#ifdef __KERNEL__
6#ifdef CONFIG_VM86 95#ifdef CONFIG_VM86
7#define X86_VM_MASK X86_EFLAGS_VM 96#define X86_VM_MASK X86_EFLAGS_VM
8#else 97#else
9#define X86_VM_MASK 0 /* No VM86 support */ 98#define X86_VM_MASK 0 /* No VM86 support */
10#endif 99#endif
100#endif
101
11#endif /* _ASM_X86_PROCESSOR_FLAGS_H */ 102#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 888184b2fc8..0d1171c9772 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -14,13 +14,13 @@ struct mm_struct;
14#include <asm/sigcontext.h> 14#include <asm/sigcontext.h>
15#include <asm/current.h> 15#include <asm/current.h>
16#include <asm/cpufeature.h> 16#include <asm/cpufeature.h>
17#include <asm/system.h>
17#include <asm/page.h> 18#include <asm/page.h>
18#include <asm/pgtable_types.h> 19#include <asm/pgtable_types.h>
19#include <asm/percpu.h> 20#include <asm/percpu.h>
20#include <asm/msr.h> 21#include <asm/msr.h>
21#include <asm/desc_defs.h> 22#include <asm/desc_defs.h>
22#include <asm/nops.h> 23#include <asm/nops.h>
23#include <asm/special_insns.h>
24 24
25#include <linux/personality.h> 25#include <linux/personality.h>
26#include <linux/cpumask.h> 26#include <linux/cpumask.h>
@@ -29,15 +29,6 @@ struct mm_struct;
29#include <linux/math64.h> 29#include <linux/math64.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/irqflags.h>
33
34/*
35 * We handle most unaligned accesses in hardware. On the other hand
36 * unaligned DMA can be quite expensive on some Nehalem processors.
37 *
38 * Based on this we disable the IP header alignment in network drivers.
39 */
40#define NET_IP_ALIGN 0
41 32
42#define HBP_NUM 4 33#define HBP_NUM 4
43/* 34/*
@@ -61,19 +52,6 @@ static inline void *current_text_addr(void)
61# define ARCH_MIN_MMSTRUCT_ALIGN 0 52# define ARCH_MIN_MMSTRUCT_ALIGN 0
62#endif 53#endif
63 54
64enum tlb_infos {
65 ENTRIES,
66 NR_INFO
67};
68
69extern u16 __read_mostly tlb_lli_4k[NR_INFO];
70extern u16 __read_mostly tlb_lli_2m[NR_INFO];
71extern u16 __read_mostly tlb_lli_4m[NR_INFO];
72extern u16 __read_mostly tlb_lld_4k[NR_INFO];
73extern u16 __read_mostly tlb_lld_2m[NR_INFO];
74extern u16 __read_mostly tlb_lld_4m[NR_INFO];
75extern s8 __read_mostly tlb_flushall_shift;
76
77/* 55/*
78 * CPU type and hardware bug flags. Kept separately for each CPU. 56 * CPU type and hardware bug flags. Kept separately for each CPU.
79 * Members of this structure are referenced in head.S, so think twice 57 * Members of this structure are referenced in head.S, so think twice
@@ -121,6 +99,7 @@ struct cpuinfo_x86 {
121 u16 apicid; 99 u16 apicid;
122 u16 initial_apicid; 100 u16 initial_apicid;
123 u16 x86_clflush_size; 101 u16 x86_clflush_size;
102#ifdef CONFIG_SMP
124 /* number of cores as seen by the OS: */ 103 /* number of cores as seen by the OS: */
125 u16 booted_cores; 104 u16 booted_cores;
126 /* Physical processor id: */ 105 /* Physical processor id: */
@@ -131,7 +110,7 @@ struct cpuinfo_x86 {
131 u8 compute_unit_id; 110 u8 compute_unit_id;
132 /* Index into per_cpu list: */ 111 /* Index into per_cpu list: */
133 u16 cpu_index; 112 u16 cpu_index;
134 u32 microcode; 113#endif
135} __attribute__((__aligned__(SMP_CACHE_BYTES))); 114} __attribute__((__aligned__(SMP_CACHE_BYTES)));
136 115
137#define X86_VENDOR_INTEL 0 116#define X86_VENDOR_INTEL 0
@@ -178,14 +157,15 @@ static inline int hlt_works(int cpu)
178 157
179extern void cpu_detect(struct cpuinfo_x86 *c); 158extern void cpu_detect(struct cpuinfo_x86 *c);
180 159
160extern struct pt_regs *idle_regs(struct pt_regs *);
161
181extern void early_cpu_init(void); 162extern void early_cpu_init(void);
182extern void identify_boot_cpu(void); 163extern void identify_boot_cpu(void);
183extern void identify_secondary_cpu(struct cpuinfo_x86 *); 164extern void identify_secondary_cpu(struct cpuinfo_x86 *);
184extern void print_cpu_info(struct cpuinfo_x86 *); 165extern void print_cpu_info(struct cpuinfo_x86 *);
185void print_cpu_msr(struct cpuinfo_x86 *);
186extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 166extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
187extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 167extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
188extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); 168extern unsigned short num_cache_leaves;
189 169
190extern void detect_extended_topology(struct cpuinfo_x86 *c); 170extern void detect_extended_topology(struct cpuinfo_x86 *c);
191extern void detect_ht(struct cpuinfo_x86 *c); 171extern void detect_ht(struct cpuinfo_x86 *c);
@@ -199,8 +179,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
199 "=b" (*ebx), 179 "=b" (*ebx),
200 "=c" (*ecx), 180 "=c" (*ecx),
201 "=d" (*edx) 181 "=d" (*edx)
202 : "0" (*eax), "2" (*ecx) 182 : "0" (*eax), "2" (*ecx));
203 : "memory");
204} 183}
205 184
206static inline void load_cr3(pgd_t *pgdir) 185static inline void load_cr3(pgd_t *pgdir)
@@ -395,8 +374,6 @@ union thread_xstate {
395}; 374};
396 375
397struct fpu { 376struct fpu {
398 unsigned int last_cpu;
399 unsigned int has_fpu;
400 union thread_xstate *state; 377 union thread_xstate *state;
401}; 378};
402 379
@@ -421,6 +398,7 @@ DECLARE_INIT_PER_CPU(irq_stack_union);
421 398
422DECLARE_PER_CPU(char *, irq_stack_ptr); 399DECLARE_PER_CPU(char *, irq_stack_ptr);
423DECLARE_PER_CPU(unsigned int, irq_count); 400DECLARE_PER_CPU(unsigned int, irq_count);
401extern unsigned long kernel_eflags;
424extern asmlinkage void ignore_sysret(void); 402extern asmlinkage void ignore_sysret(void);
425#else /* X86_64 */ 403#else /* X86_64 */
426#ifdef CONFIG_CC_STACKPROTECTOR 404#ifdef CONFIG_CC_STACKPROTECTOR
@@ -473,7 +451,7 @@ struct thread_struct {
473 unsigned long ptrace_dr7; 451 unsigned long ptrace_dr7;
474 /* Fault info: */ 452 /* Fault info: */
475 unsigned long cr2; 453 unsigned long cr2;
476 unsigned long trap_nr; 454 unsigned long trap_no;
477 unsigned long error_code; 455 unsigned long error_code;
478 /* floating point and extended processor state */ 456 /* floating point and extended processor state */
479 struct fpu fpu; 457 struct fpu fpu;
@@ -494,6 +472,61 @@ struct thread_struct {
494 unsigned io_bitmap_max; 472 unsigned io_bitmap_max;
495}; 473};
496 474
475static inline unsigned long native_get_debugreg(int regno)
476{
477 unsigned long val = 0; /* Damn you, gcc! */
478
479 switch (regno) {
480 case 0:
481 asm("mov %%db0, %0" :"=r" (val));
482 break;
483 case 1:
484 asm("mov %%db1, %0" :"=r" (val));
485 break;
486 case 2:
487 asm("mov %%db2, %0" :"=r" (val));
488 break;
489 case 3:
490 asm("mov %%db3, %0" :"=r" (val));
491 break;
492 case 6:
493 asm("mov %%db6, %0" :"=r" (val));
494 break;
495 case 7:
496 asm("mov %%db7, %0" :"=r" (val));
497 break;
498 default:
499 BUG();
500 }
501 return val;
502}
503
504static inline void native_set_debugreg(int regno, unsigned long value)
505{
506 switch (regno) {
507 case 0:
508 asm("mov %0, %%db0" ::"r" (value));
509 break;
510 case 1:
511 asm("mov %0, %%db1" ::"r" (value));
512 break;
513 case 2:
514 asm("mov %0, %%db2" ::"r" (value));
515 break;
516 case 3:
517 asm("mov %0, %%db3" ::"r" (value));
518 break;
519 case 6:
520 asm("mov %0, %%db6" ::"r" (value));
521 break;
522 case 7:
523 asm("mov %0, %%db7" ::"r" (value));
524 break;
525 default:
526 BUG();
527 }
528}
529
497/* 530/*
498 * Set IOPL bits in EFLAGS from given mask 531 * Set IOPL bits in EFLAGS from given mask
499 */ 532 */
@@ -539,6 +572,14 @@ static inline void native_swapgs(void)
539#define __cpuid native_cpuid 572#define __cpuid native_cpuid
540#define paravirt_enabled() 0 573#define paravirt_enabled() 0
541 574
575/*
576 * These special macros can be used to get or set a debugging register
577 */
578#define get_debugreg(var, register) \
579 (var) = native_get_debugreg(register)
580#define set_debugreg(value, register) \
581 native_set_debugreg(register, value)
582
542static inline void load_sp0(struct tss_struct *tss, 583static inline void load_sp0(struct tss_struct *tss,
543 struct thread_struct *thread) 584 struct thread_struct *thread)
544{ 585{
@@ -554,16 +595,13 @@ static inline void load_sp0(struct tss_struct *tss,
554 * enable), so that any CPU's that boot up 595 * enable), so that any CPU's that boot up
555 * after us can get the correct flags. 596 * after us can get the correct flags.
556 */ 597 */
557extern unsigned long mmu_cr4_features; 598extern unsigned long mmu_cr4_features;
558extern u32 *trampoline_cr4_features;
559 599
560static inline void set_in_cr4(unsigned long mask) 600static inline void set_in_cr4(unsigned long mask)
561{ 601{
562 unsigned long cr4; 602 unsigned long cr4;
563 603
564 mmu_cr4_features |= mask; 604 mmu_cr4_features |= mask;
565 if (trampoline_cr4_features)
566 *trampoline_cr4_features = mmu_cr4_features;
567 cr4 = read_cr4(); 605 cr4 = read_cr4();
568 cr4 |= mask; 606 cr4 |= mask;
569 write_cr4(cr4); 607 write_cr4(cr4);
@@ -574,8 +612,6 @@ static inline void clear_in_cr4(unsigned long mask)
574 unsigned long cr4; 612 unsigned long cr4;
575 613
576 mmu_cr4_features &= ~mask; 614 mmu_cr4_features &= ~mask;
577 if (trampoline_cr4_features)
578 *trampoline_cr4_features = mmu_cr4_features;
579 cr4 = read_cr4(); 615 cr4 = read_cr4();
580 cr4 &= ~mask; 616 cr4 &= ~mask;
581 write_cr4(cr4); 617 write_cr4(cr4);
@@ -586,9 +622,17 @@ typedef struct {
586} mm_segment_t; 622} mm_segment_t;
587 623
588 624
625/*
626 * create a kernel thread without removing it from tasklists
627 */
628extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
629
589/* Free all resources held by a thread. */ 630/* Free all resources held by a thread. */
590extern void release_thread(struct task_struct *); 631extern void release_thread(struct task_struct *);
591 632
633/* Prepare to copy thread state - unlazy all lazy state */
634extern void prepare_to_copy(struct task_struct *tsk);
635
592unsigned long get_wchan(struct task_struct *p); 636unsigned long get_wchan(struct task_struct *p);
593 637
594/* 638/*
@@ -670,29 +714,18 @@ static inline void sync_core(void)
670{ 714{
671 int tmp; 715 int tmp;
672 716
673#ifdef CONFIG_M486 717#if defined(CONFIG_M386) || defined(CONFIG_M486)
674 /* 718 if (boot_cpu_data.x86 < 5)
675 * Do a CPUID if available, otherwise do a jump. The jump 719 /* There is no speculative execution.
676 * can conveniently enough be the jump around CPUID. 720 * jmp is a barrier to prefetching. */
677 */ 721 asm volatile("jmp 1f\n1:\n" ::: "memory");
678 asm volatile("cmpl %2,%1\n\t" 722 else
679 "jl 1f\n\t"
680 "cpuid\n"
681 "1:"
682 : "=a" (tmp)
683 : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
684 : "ebx", "ecx", "edx", "memory");
685#else
686 /*
687 * CPUID is a barrier to speculative execution.
688 * Prefetched instructions are automatically
689 * invalidated when modified.
690 */
691 asm volatile("cpuid"
692 : "=a" (tmp)
693 : "0" (1)
694 : "ebx", "ecx", "edx", "memory");
695#endif 723#endif
724 /* cpuid is a barrier to speculative execution.
725 * Prefetched instructions are automatically
726 * invalidated when modified. */
727 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
728 : "ebx", "ecx", "edx", "memory");
696} 729}
697 730
698static inline void __monitor(const void *eax, unsigned long ecx, 731static inline void __monitor(const void *eax, unsigned long ecx,
@@ -762,8 +795,6 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
762 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 795 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
763} 796}
764 797
765extern void set_task_blockstep(struct task_struct *task, bool on);
766
767/* 798/*
768 * from system description table in BIOS. Mostly for MCA use, but 799 * from system description table in BIOS. Mostly for MCA use, but
769 * others may find it useful: 800 * others may find it useful:
@@ -893,9 +924,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
893#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 924#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
894 0xc0000000 : 0xFFFFe000) 925 0xc0000000 : 0xFFFFe000)
895 926
896#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ 927#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
897 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 928 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
898#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ 929#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
899 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 930 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
900 931
901#define STACK_TOP TASK_SIZE 932#define STACK_TOP TASK_SIZE
@@ -917,12 +948,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
917 948
918#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 949#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
919extern unsigned long KSTK_ESP(struct task_struct *task); 950extern unsigned long KSTK_ESP(struct task_struct *task);
920
921/*
922 * User space RSP while inside the SYSCALL fast path
923 */
924DECLARE_PER_CPU(unsigned long, old_rsp);
925
926#endif /* CONFIG_X86_64 */ 951#endif /* CONFIG_X86_64 */
927 952
928extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 953extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
@@ -994,12 +1019,4 @@ extern bool cpu_has_amd_erratum(const int *);
994#define cpu_has_amd_erratum(x) (false) 1019#define cpu_has_amd_erratum(x) (false)
995#endif /* CONFIG_CPU_SUP_AMD */ 1020#endif /* CONFIG_CPU_SUP_AMD */
996 1021
997extern unsigned long arch_align_stack(unsigned long sp);
998extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
999
1000void default_idle(void);
1001bool set_pm_idle_to_default(void);
1002
1003void stop_this_cpu(void *dummy);
1004
1005#endif /* _ASM_X86_PROCESSOR_H */ 1022#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 60bef663609..644dd885f05 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -21,6 +21,7 @@
21#include <asm/irq.h> 21#include <asm/irq.h>
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/irq_controller.h>
24 25
25#ifdef CONFIG_OF 26#ifdef CONFIG_OF
26extern int of_ioapic; 27extern int of_ioapic;
@@ -42,6 +43,15 @@ extern char cmd_line[COMMAND_LINE_SIZE];
42#define pci_address_to_pio pci_address_to_pio 43#define pci_address_to_pio pci_address_to_pio
43unsigned long pci_address_to_pio(phys_addr_t addr); 44unsigned long pci_address_to_pio(phys_addr_t addr);
44 45
46/**
47 * irq_dispose_mapping - Unmap an interrupt
48 * @virq: linux virq number of the interrupt to unmap
49 *
50 * FIXME: We really should implement proper virq handling like power,
51 * but that's going to be major surgery.
52 */
53static inline void irq_dispose_mapping(unsigned int virq) { }
54
45#define HAVE_ARCH_DEVTREE_FIXUPS 55#define HAVE_ARCH_DEVTREE_FIXUPS
46 56
47#endif /* __ASSEMBLY__ */ 57#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 942a08623a1..35664547125 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -1,12 +1,44 @@
1#ifndef _ASM_X86_PTRACE_H 1#ifndef _ASM_X86_PTRACE_H
2#define _ASM_X86_PTRACE_H 2#define _ASM_X86_PTRACE_H
3 3
4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h>
6#include <asm/processor-flags.h>
7
8#ifdef __KERNEL__
4#include <asm/segment.h> 9#include <asm/segment.h>
5#include <asm/page_types.h> 10#include <asm/page_types.h>
6#include <uapi/asm/ptrace.h> 11#endif
7 12
8#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14
9#ifdef __i386__ 15#ifdef __i386__
16/* this struct defines the way the registers are stored on the
17 stack during a system call. */
18
19#ifndef __KERNEL__
20
21struct pt_regs {
22 long ebx;
23 long ecx;
24 long edx;
25 long esi;
26 long edi;
27 long ebp;
28 long eax;
29 int xds;
30 int xes;
31 int xfs;
32 int xgs;
33 long orig_eax;
34 long eip;
35 int xcs;
36 long eflags;
37 long esp;
38 int xss;
39};
40
41#else /* __KERNEL__ */
10 42
11struct pt_regs { 43struct pt_regs {
12 unsigned long bx; 44 unsigned long bx;
@@ -28,8 +60,42 @@ struct pt_regs {
28 unsigned long ss; 60 unsigned long ss;
29}; 61};
30 62
63#endif /* __KERNEL__ */
64
31#else /* __i386__ */ 65#else /* __i386__ */
32 66
67#ifndef __KERNEL__
68
69struct pt_regs {
70 unsigned long r15;
71 unsigned long r14;
72 unsigned long r13;
73 unsigned long r12;
74 unsigned long rbp;
75 unsigned long rbx;
76/* arguments: non interrupts/non tracing syscalls only save up to here*/
77 unsigned long r11;
78 unsigned long r10;
79 unsigned long r9;
80 unsigned long r8;
81 unsigned long rax;
82 unsigned long rcx;
83 unsigned long rdx;
84 unsigned long rsi;
85 unsigned long rdi;
86 unsigned long orig_rax;
87/* end of arguments */
88/* cpu exception frame or undefined */
89 unsigned long rip;
90 unsigned long cs;
91 unsigned long eflags;
92 unsigned long rsp;
93 unsigned long ss;
94/* top of stack page */
95};
96
97#else /* __KERNEL__ */
98
33struct pt_regs { 99struct pt_regs {
34 unsigned long r15; 100 unsigned long r15;
35 unsigned long r14; 101 unsigned long r14;
@@ -58,8 +124,12 @@ struct pt_regs {
58/* top of stack page */ 124/* top of stack page */
59}; 125};
60 126
127#endif /* __KERNEL__ */
61#endif /* !__i386__ */ 128#endif /* !__i386__ */
62 129
130
131#ifdef __KERNEL__
132
63#include <linux/init.h> 133#include <linux/init.h>
64#ifdef CONFIG_PARAVIRT 134#ifdef CONFIG_PARAVIRT
65#include <asm/paravirt_types.h> 135#include <asm/paravirt_types.h>
@@ -75,6 +145,7 @@ extern unsigned long
75convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); 145convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
76extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 146extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
77 int error_code, int si_code); 147 int error_code, int si_code);
148void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
78 149
79extern long syscall_trace_enter(struct pt_regs *); 150extern long syscall_trace_enter(struct pt_regs *);
80extern void syscall_trace_leave(struct pt_regs *); 151extern void syscall_trace_leave(struct pt_regs *);
@@ -133,23 +204,23 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
133 return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; 204 return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
134#endif 205#endif
135} 206}
136
137#define current_user_stack_pointer() this_cpu_read(old_rsp)
138/* ia32 vs. x32 difference */
139#define compat_user_stack_pointer() \
140 (test_thread_flag(TIF_IA32) \
141 ? current_pt_regs()->sp \
142 : this_cpu_read(old_rsp))
143#endif 207#endif
144 208
145#ifdef CONFIG_X86_32 209/*
146extern unsigned long kernel_stack_pointer(struct pt_regs *regs); 210 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
147#else 211 * when it traps. The previous stack will be directly underneath the saved
212 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
213 *
214 * This is valid only for kernel mode traps.
215 */
148static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) 216static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
149{ 217{
218#ifdef CONFIG_X86_32
219 return (unsigned long)(&regs->sp);
220#else
150 return regs->sp; 221 return regs->sp;
151}
152#endif 222#endif
223}
153 224
154#define GET_IP(regs) ((regs)->ip) 225#define GET_IP(regs) ((regs)->ip)
155#define GET_FP(regs) ((regs)->bp) 226#define GET_FP(regs) ((regs)->bp)
@@ -176,15 +247,6 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
176{ 247{
177 if (unlikely(offset > MAX_REG_OFFSET)) 248 if (unlikely(offset > MAX_REG_OFFSET))
178 return 0; 249 return 0;
179#ifdef CONFIG_X86_32
180 /*
181 * Traps from the kernel do not save sp and ss.
182 * Use the helper function to retrieve sp.
183 */
184 if (offset == offsetof(struct pt_regs, sp) &&
185 regs->cs == __KERNEL_CS)
186 return kernel_stack_pointer(regs);
187#endif
188 return *(unsigned long *)((unsigned long)regs + offset); 250 return *(unsigned long *)((unsigned long)regs + offset);
189} 251}
190 252
@@ -238,5 +300,8 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
238extern int do_set_thread_area(struct task_struct *p, int idx, 300extern int do_set_thread_area(struct task_struct *p, int idx,
239 struct user_desc __user *info, int can_allocate); 301 struct user_desc __user *info, int can_allocate);
240 302
303#endif /* __KERNEL__ */
304
241#endif /* !__ASSEMBLY__ */ 305#endif /* !__ASSEMBLY__ */
306
242#endif /* _ASM_X86_PTRACE_H */ 307#endif /* _ASM_X86_PTRACE_H */
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 6167fd79818..35f2d1948ad 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -40,6 +40,5 @@ struct pvclock_wall_clock {
40} __attribute__((__packed__)); 40} __attribute__((__packed__));
41 41
42#define PVCLOCK_TSC_STABLE_BIT (1 << 0) 42#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
43#define PVCLOCK_GUEST_STOPPED (1 << 1)
44#endif /* __ASSEMBLY__ */ 43#endif /* __ASSEMBLY__ */
45#endif /* _ASM_X86_PVCLOCK_ABI_H */ 44#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 109a9dd5d45..c59cc97fe6c 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -6,7 +6,6 @@
6 6
7/* some helper functions for xen and kvm pv clock sources */ 7/* some helper functions for xen and kvm pv clock sources */
8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); 8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
9u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
10void pvclock_set_flags(u8 flags); 9void pvclock_set_flags(u8 flags);
11unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); 10unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
12void pvclock_read_wallclock(struct pvclock_wall_clock *wall, 11void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
@@ -57,50 +56,4 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
57 return product; 56 return product;
58} 57}
59 58
60static __always_inline
61u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
62{
63 u64 delta = __native_read_tsc() - src->tsc_timestamp;
64 return pvclock_scale_delta(delta, src->tsc_to_system_mul,
65 src->tsc_shift);
66}
67
68static __always_inline
69unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
70 cycle_t *cycles, u8 *flags)
71{
72 unsigned version;
73 cycle_t ret, offset;
74 u8 ret_flags;
75
76 version = src->version;
77 /* Note: emulated platforms which do not advertise SSE2 support
78 * result in kvmclock not using the necessary RDTSC barriers.
79 * Without barriers, it is possible that RDTSC instruction reads from
80 * the time stamp counter outside rdtsc_barrier protected section
81 * below, resulting in violation of monotonicity.
82 */
83 rdtsc_barrier();
84 offset = pvclock_get_nsec_offset(src);
85 ret = src->system_time + offset;
86 ret_flags = src->flags;
87 rdtsc_barrier();
88
89 *cycles = ret;
90 *flags = ret_flags;
91 return version;
92}
93
94struct pvclock_vsyscall_time_info {
95 struct pvclock_vcpu_time_info pvti;
96 u32 migrate_count;
97} __attribute__((__aligned__(SMP_CACHE_BYTES)));
98
99#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
100#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1)
101
102int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
103 int size);
104struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu);
105
106#endif /* _ASM_X86_PVCLOCK_H */ 59#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
deleted file mode 100644
index fe1ec5bcd84..00000000000
--- a/arch/x86/include/asm/realmode.h
+++ /dev/null
@@ -1,63 +0,0 @@
1#ifndef _ARCH_X86_REALMODE_H
2#define _ARCH_X86_REALMODE_H
3
4#include <linux/types.h>
5#include <asm/io.h>
6
7/* This must match data at realmode.S */
8struct real_mode_header {
9 u32 text_start;
10 u32 ro_end;
11 /* SMP trampoline */
12 u32 trampoline_start;
13 u32 trampoline_status;
14 u32 trampoline_header;
15#ifdef CONFIG_X86_64
16 u32 trampoline_pgd;
17#endif
18 /* ACPI S3 wakeup */
19#ifdef CONFIG_ACPI_SLEEP
20 u32 wakeup_start;
21 u32 wakeup_header;
22#endif
23 /* APM/BIOS reboot */
24 u32 machine_real_restart_asm;
25#ifdef CONFIG_X86_64
26 u32 machine_real_restart_seg;
27#endif
28};
29
30/* This must match data at trampoline_32/64.S */
31struct trampoline_header {
32#ifdef CONFIG_X86_32
33 u32 start;
34 u16 gdt_pad;
35 u16 gdt_limit;
36 u32 gdt_base;
37#else
38 u64 start;
39 u64 efer;
40 u32 cr4;
41#endif
42};
43
44extern struct real_mode_header *real_mode_header;
45extern unsigned char real_mode_blob_end[];
46
47extern unsigned long init_rsp;
48extern unsigned long initial_code;
49extern unsigned long initial_gs;
50
51extern unsigned char real_mode_blob[];
52extern unsigned char real_mode_relocs[];
53
54#ifdef CONFIG_X86_32
55extern unsigned char startup_32_smp[];
56extern unsigned char boot_gdt[];
57#else
58extern unsigned char secondary_startup_64[];
59#endif
60
61extern void __init setup_real_mode(void);
62
63#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index a82c4f1b4d8..3250e3d605d 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -18,12 +18,12 @@ extern struct machine_ops machine_ops;
18 18
19void native_machine_crash_shutdown(struct pt_regs *regs); 19void native_machine_crash_shutdown(struct pt_regs *regs);
20void native_machine_shutdown(void); 20void native_machine_shutdown(void);
21void __noreturn machine_real_restart(unsigned int type); 21void machine_real_restart(unsigned int type);
22/* These must match dispatch in arch/x86/realmore/rm/reboot.S */ 22/* These must match dispatch_table in reboot_32.S */
23#define MRR_BIOS 0 23#define MRR_BIOS 0
24#define MRR_APM 1 24#define MRR_APM 1
25 25
26typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); 26typedef void (*nmi_shootdown_cb)(int, struct die_args*);
27void nmi_shootdown_cpus(nmi_shootdown_cb callback); 27void nmi_shootdown_cpus(nmi_shootdown_cb callback);
28 28
29#endif /* _ASM_X86_REBOOT_H */ 29#endif /* _ASM_X86_REBOOT_H */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 2dbe4a721ce..df4cd32b4cc 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -204,7 +204,13 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
204 */ 204 */
205static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) 205static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
206{ 206{
207 return delta + xadd(&sem->count, delta); 207 long tmp = delta;
208
209 asm volatile(LOCK_PREFIX "xadd %0,%1"
210 : "+r" (tmp), "+m" (sem->count)
211 : : "memory");
212
213 return tmp + delta;
208} 214}
209 215
210#endif /* __KERNEL__ */ 216#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/seccomp.h b/arch/x86/include/asm/seccomp.h
index 0f3d7f09922..c62e58a5a90 100644
--- a/arch/x86/include/asm/seccomp.h
+++ b/arch/x86/include/asm/seccomp.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include <asm/seccomp_32.h> 2# include "seccomp_32.h"
3#else 3#else
4# include <asm/seccomp_64.h> 4# include "seccomp_64.h"
5#endif 5#endif
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index c48a95035a7..5e641715c3f 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -205,70 +205,14 @@
205 205
206#define IDT_ENTRIES 256 206#define IDT_ENTRIES 256
207#define NUM_EXCEPTION_VECTORS 32 207#define NUM_EXCEPTION_VECTORS 32
208/* Bitmask of exception vectors which push an error code on the stack */
209#define EXCEPTION_ERRCODE_MASK 0x00027d00
210#define GDT_SIZE (GDT_ENTRIES * 8) 208#define GDT_SIZE (GDT_ENTRIES * 8)
211#define GDT_ENTRY_TLS_ENTRIES 3 209#define GDT_ENTRY_TLS_ENTRIES 3
212#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) 210#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
213 211
214#ifdef __KERNEL__ 212#ifdef __KERNEL__
215#ifndef __ASSEMBLY__ 213#ifndef __ASSEMBLY__
216extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; 214extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
217 215#endif
218/* 216#endif
219 * Load a segment. Fall back on loading the zero
220 * segment if something goes wrong..
221 */
222#define loadsegment(seg, value) \
223do { \
224 unsigned short __val = (value); \
225 \
226 asm volatile(" \n" \
227 "1: movl %k0,%%" #seg " \n" \
228 \
229 ".section .fixup,\"ax\" \n" \
230 "2: xorl %k0,%k0 \n" \
231 " jmp 1b \n" \
232 ".previous \n" \
233 \
234 _ASM_EXTABLE(1b, 2b) \
235 \
236 : "+r" (__val) : : "memory"); \
237} while (0)
238
239/*
240 * Save a segment register away
241 */
242#define savesegment(seg, value) \
243 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
244
245/*
246 * x86_32 user gs accessors.
247 */
248#ifdef CONFIG_X86_32
249#ifdef CONFIG_X86_32_LAZY_GS
250#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
251#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
252#define task_user_gs(tsk) ((tsk)->thread.gs)
253#define lazy_save_gs(v) savesegment(gs, (v))
254#define lazy_load_gs(v) loadsegment(gs, (v))
255#else /* X86_32_LAZY_GS */
256#define get_user_gs(regs) (u16)((regs)->gs)
257#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
258#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
259#define lazy_save_gs(v) do { } while (0)
260#define lazy_load_gs(v) do { } while (0)
261#endif /* X86_32_LAZY_GS */
262#endif /* X86_32 */
263
264static inline unsigned long get_limit(unsigned long segment)
265{
266 unsigned long __limit;
267 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
268 return __limit + 1;
269}
270
271#endif /* !__ASSEMBLY__ */
272#endif /* __KERNEL__ */
273 217
274#endif /* _ASM_X86_SEGMENT_H */ 218#endif /* _ASM_X86_SEGMENT_H */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index b7bf3505e1e..9756551ec76 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -1,8 +1,7 @@
1#ifndef _ASM_X86_SETUP_H 1#ifndef _ASM_X86_SETUP_H
2#define _ASM_X86_SETUP_H 2#define _ASM_X86_SETUP_H
3 3
4#include <uapi/asm/setup.h> 4#ifdef __KERNEL__
5
6 5
7#define COMMAND_LINE_SIZE 2048 6#define COMMAND_LINE_SIZE 2048
8 7
@@ -48,7 +47,7 @@ extern void reserve_standard_io_resources(void);
48extern void i386_reserve_resources(void); 47extern void i386_reserve_resources(void);
49extern void setup_default_timer_irq(void); 48extern void setup_default_timer_irq(void);
50 49
51#ifdef CONFIG_X86_INTEL_MID 50#ifdef CONFIG_X86_MRST
52extern void x86_mrst_early_setup(void); 51extern void x86_mrst_early_setup(void);
53#else 52#else
54static inline void x86_mrst_early_setup(void) { } 53static inline void x86_mrst_early_setup(void) { }
@@ -124,4 +123,6 @@ void __init x86_64_start_reservations(char *real_mode_data);
124 .size .brk.name,.-1b; \ 123 .size .brk.name,.-1b; \
125 .popsection 124 .popsection
126#endif /* __ASSEMBLY__ */ 125#endif /* __ASSEMBLY__ */
126#endif /* __KERNEL__ */
127
127#endif /* _ASM_X86_SETUP_H */ 128#endif /* _ASM_X86_SETUP_H */
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 9dfce4e0417..04459d25e66 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -1,9 +1,104 @@
1#ifndef _ASM_X86_SIGCONTEXT_H 1#ifndef _ASM_X86_SIGCONTEXT_H
2#define _ASM_X86_SIGCONTEXT_H 2#define _ASM_X86_SIGCONTEXT_H
3 3
4#include <uapi/asm/sigcontext.h> 4#include <linux/compiler.h>
5#include <linux/types.h>
6
7#define FP_XSTATE_MAGIC1 0x46505853U
8#define FP_XSTATE_MAGIC2 0x46505845U
9#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
10
11/*
12 * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
13 * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
14 * are used to extended the fpstate pointer in the sigcontext, which now
15 * includes the extended state information along with fpstate information.
16 *
17 * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
18 * area and FP_XSTATE_MAGIC2 at the end of memory layout
19 * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
20 * extended state information in the memory layout pointed by the fpstate
21 * pointer in sigcontext.
22 */
23struct _fpx_sw_bytes {
24 __u32 magic1; /* FP_XSTATE_MAGIC1 */
25 __u32 extended_size; /* total size of the layout referred by
26 * fpstate pointer in the sigcontext.
27 */
28 __u64 xstate_bv;
29 /* feature bit mask (including fp/sse/extended
30 * state) that is present in the memory
31 * layout.
32 */
33 __u32 xstate_size; /* actual xsave state size, based on the
34 * features saved in the layout.
35 * 'extended_size' will be greater than
36 * 'xstate_size'.
37 */
38 __u32 padding[7]; /* for future use. */
39};
5 40
6#ifdef __i386__ 41#ifdef __i386__
42/*
43 * As documented in the iBCS2 standard..
44 *
45 * The first part of "struct _fpstate" is just the normal i387
46 * hardware setup, the extra "status" word is used to save the
47 * coprocessor status word before entering the handler.
48 *
49 * Pentium III FXSR, SSE support
50 * Gareth Hughes <gareth@valinux.com>, May 2000
51 *
52 * The FPU state data structure has had to grow to accommodate the
53 * extended FPU state required by the Streaming SIMD Extensions.
54 * There is no documented standard to accomplish this at the moment.
55 */
56struct _fpreg {
57 unsigned short significand[4];
58 unsigned short exponent;
59};
60
61struct _fpxreg {
62 unsigned short significand[4];
63 unsigned short exponent;
64 unsigned short padding[3];
65};
66
67struct _xmmreg {
68 unsigned long element[4];
69};
70
71struct _fpstate {
72 /* Regular FPU environment */
73 unsigned long cw;
74 unsigned long sw;
75 unsigned long tag;
76 unsigned long ipoff;
77 unsigned long cssel;
78 unsigned long dataoff;
79 unsigned long datasel;
80 struct _fpreg _st[8];
81 unsigned short status;
82 unsigned short magic; /* 0xffff = regular FPU data only */
83
84 /* FXSR FPU environment */
85 unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
86 unsigned long mxcsr;
87 unsigned long reserved;
88 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
89 struct _xmmreg _xmm[8];
90 unsigned long padding1[44];
91
92 union {
93 unsigned long padding2[12];
94 struct _fpx_sw_bytes sw_reserved; /* represents the extended
95 * state info */
96 };
97};
98
99#define X86_FXSR_MAGIC 0x0000
100
101#ifdef __KERNEL__
7struct sigcontext { 102struct sigcontext {
8 unsigned short gs, __gsh; 103 unsigned short gs, __gsh;
9 unsigned short fs, __fsh; 104 unsigned short fs, __fsh;
@@ -36,7 +131,62 @@ struct sigcontext {
36 unsigned long oldmask; 131 unsigned long oldmask;
37 unsigned long cr2; 132 unsigned long cr2;
38}; 133};
134#else /* __KERNEL__ */
135/*
136 * User-space might still rely on the old definition:
137 */
138struct sigcontext {
139 unsigned short gs, __gsh;
140 unsigned short fs, __fsh;
141 unsigned short es, __esh;
142 unsigned short ds, __dsh;
143 unsigned long edi;
144 unsigned long esi;
145 unsigned long ebp;
146 unsigned long esp;
147 unsigned long ebx;
148 unsigned long edx;
149 unsigned long ecx;
150 unsigned long eax;
151 unsigned long trapno;
152 unsigned long err;
153 unsigned long eip;
154 unsigned short cs, __csh;
155 unsigned long eflags;
156 unsigned long esp_at_signal;
157 unsigned short ss, __ssh;
158 struct _fpstate __user *fpstate;
159 unsigned long oldmask;
160 unsigned long cr2;
161};
162#endif /* !__KERNEL__ */
163
39#else /* __i386__ */ 164#else /* __i386__ */
165
166/* FXSAVE frame */
167/* Note: reserved1/2 may someday contain valuable data. Always save/restore
168 them when you change signal frames. */
169struct _fpstate {
170 __u16 cwd;
171 __u16 swd;
172 __u16 twd; /* Note this is not the same as the
173 32bit/x87/FSAVE twd */
174 __u16 fop;
175 __u64 rip;
176 __u64 rdp;
177 __u32 mxcsr;
178 __u32 mxcsr_mask;
179 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
180 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
181 __u32 reserved2[12];
182 union {
183 __u32 reserved3[12];
184 struct _fpx_sw_bytes sw_reserved; /* represents the extended
185 * state information */
186 };
187};
188
189#ifdef __KERNEL__
40struct sigcontext { 190struct sigcontext {
41 unsigned long r8; 191 unsigned long r8;
42 unsigned long r9; 192 unsigned long r9;
@@ -75,5 +225,66 @@ struct sigcontext {
75 void __user *fpstate; /* zero when no FPU/extended context */ 225 void __user *fpstate; /* zero when no FPU/extended context */
76 unsigned long reserved1[8]; 226 unsigned long reserved1[8];
77}; 227};
228#else /* __KERNEL__ */
229/*
230 * User-space might still rely on the old definition:
231 */
232struct sigcontext {
233 unsigned long r8;
234 unsigned long r9;
235 unsigned long r10;
236 unsigned long r11;
237 unsigned long r12;
238 unsigned long r13;
239 unsigned long r14;
240 unsigned long r15;
241 unsigned long rdi;
242 unsigned long rsi;
243 unsigned long rbp;
244 unsigned long rbx;
245 unsigned long rdx;
246 unsigned long rax;
247 unsigned long rcx;
248 unsigned long rsp;
249 unsigned long rip;
250 unsigned long eflags; /* RFLAGS */
251 unsigned short cs;
252 unsigned short gs;
253 unsigned short fs;
254 unsigned short __pad0;
255 unsigned long err;
256 unsigned long trapno;
257 unsigned long oldmask;
258 unsigned long cr2;
259 struct _fpstate __user *fpstate; /* zero when no FPU context */
260 unsigned long reserved1[8];
261};
262#endif /* !__KERNEL__ */
263
78#endif /* !__i386__ */ 264#endif /* !__i386__ */
265
266struct _xsave_hdr {
267 __u64 xstate_bv;
268 __u64 reserved1[2];
269 __u64 reserved2[5];
270};
271
272struct _ymmh_state {
273 /* 16 * 16 bytes for each YMMH-reg */
274 __u32 ymmh_space[64];
275};
276
277/*
278 * Extended state pointed by the fpstate pointer in the sigcontext.
279 * In addition to the fpstate, information encoded in the xstate_hdr
280 * indicates the presence of other extended state information
281 * supported by the processor and OS.
282 */
283struct _xstate {
284 struct _fpstate fpstate;
285 struct _xsave_hdr xstate_hdr;
286 struct _ymmh_state ymmh;
287 /* new processor state extensions go here */
288};
289
79#endif /* _ASM_X86_SIGCONTEXT_H */ 290#endif /* _ASM_X86_SIGCONTEXT_H */
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
index 7c7c27c97da..4e0fe26d27d 100644
--- a/arch/x86/include/asm/sigframe.h
+++ b/arch/x86/include/asm/sigframe.h
@@ -59,25 +59,12 @@ struct rt_sigframe_ia32 {
59#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */ 59#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
60 60
61#ifdef CONFIG_X86_64 61#ifdef CONFIG_X86_64
62
63struct rt_sigframe { 62struct rt_sigframe {
64 char __user *pretcode; 63 char __user *pretcode;
65 struct ucontext uc; 64 struct ucontext uc;
66 struct siginfo info; 65 struct siginfo info;
67 /* fp state follows here */ 66 /* fp state follows here */
68}; 67};
69
70#ifdef CONFIG_X86_X32_ABI
71
72struct rt_sigframe_x32 {
73 u64 pretcode;
74 struct ucontext_x32 uc;
75 compat_siginfo_t info;
76 /* fp state follows here */
77};
78
79#endif /* CONFIG_X86_X32_ABI */
80
81#endif /* CONFIG_X86_64 */ 68#endif /* CONFIG_X86_64 */
82 69
83#endif /* _ASM_X86_SIGFRAME_H */ 70#endif /* _ASM_X86_SIGFRAME_H */
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
deleted file mode 100644
index beff97f7df3..00000000000
--- a/arch/x86/include/asm/sighandling.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASM_X86_SIGHANDLING_H
2#define _ASM_X86_SIGHANDLING_H
3
4#include <linux/compiler.h>
5#include <linux/ptrace.h>
6#include <linux/signal.h>
7
8#include <asm/processor-flags.h>
9
10#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
11 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
12 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
13 X86_EFLAGS_CF)
14
15void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
16
17int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
18 unsigned long *pax);
19int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
20 struct pt_regs *regs, unsigned long mask);
21
22#endif /* _ASM_X86_SIGHANDLING_H */
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 216bf364a7e..598457cbd0f 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -2,6 +2,14 @@
2#define _ASM_X86_SIGNAL_H 2#define _ASM_X86_SIGNAL_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6#include <linux/time.h>
7#include <linux/compiler.h>
8
9/* Avoid too many header ordering problems. */
10struct siginfo;
11
12#ifdef __KERNEL__
5#include <linux/linkage.h> 13#include <linux/linkage.h>
6 14
7/* Most things should be clean enough to redefine this at will, if care 15/* Most things should be clean enough to redefine this at will, if care
@@ -23,15 +31,102 @@ typedef struct {
23 unsigned long sig[_NSIG_WORDS]; 31 unsigned long sig[_NSIG_WORDS];
24} sigset_t; 32} sigset_t;
25 33
26#ifndef CONFIG_COMPAT 34#else
27typedef sigset_t compat_sigset_t; 35/* Here we must cater to libcs that poke about in kernel headers. */
28#endif 36
37#define NSIG 32
38typedef unsigned long sigset_t;
29 39
40#endif /* __KERNEL__ */
30#endif /* __ASSEMBLY__ */ 41#endif /* __ASSEMBLY__ */
31#include <uapi/asm/signal.h> 42
43#define SIGHUP 1
44#define SIGINT 2
45#define SIGQUIT 3
46#define SIGILL 4
47#define SIGTRAP 5
48#define SIGABRT 6
49#define SIGIOT 6
50#define SIGBUS 7
51#define SIGFPE 8
52#define SIGKILL 9
53#define SIGUSR1 10
54#define SIGSEGV 11
55#define SIGUSR2 12
56#define SIGPIPE 13
57#define SIGALRM 14
58#define SIGTERM 15
59#define SIGSTKFLT 16
60#define SIGCHLD 17
61#define SIGCONT 18
62#define SIGSTOP 19
63#define SIGTSTP 20
64#define SIGTTIN 21
65#define SIGTTOU 22
66#define SIGURG 23
67#define SIGXCPU 24
68#define SIGXFSZ 25
69#define SIGVTALRM 26
70#define SIGPROF 27
71#define SIGWINCH 28
72#define SIGIO 29
73#define SIGPOLL SIGIO
74/*
75#define SIGLOST 29
76*/
77#define SIGPWR 30
78#define SIGSYS 31
79#define SIGUNUSED 31
80
81/* These should not be considered constants from userland. */
82#define SIGRTMIN 32
83#define SIGRTMAX _NSIG
84
85/*
86 * SA_FLAGS values:
87 *
88 * SA_ONSTACK indicates that a registered stack_t will be used.
89 * SA_RESTART flag to get restarting signals (which were the default long ago)
90 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
91 * SA_RESETHAND clears the handler when the signal is delivered.
92 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
93 * SA_NODEFER prevents the current signal from being masked in the handler.
94 *
95 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
96 * Unix names RESETHAND and NODEFER respectively.
97 */
98#define SA_NOCLDSTOP 0x00000001u
99#define SA_NOCLDWAIT 0x00000002u
100#define SA_SIGINFO 0x00000004u
101#define SA_ONSTACK 0x08000000u
102#define SA_RESTART 0x10000000u
103#define SA_NODEFER 0x40000000u
104#define SA_RESETHAND 0x80000000u
105
106#define SA_NOMASK SA_NODEFER
107#define SA_ONESHOT SA_RESETHAND
108
109#define SA_RESTORER 0x04000000
110
111/*
112 * sigaltstack controls
113 */
114#define SS_ONSTACK 1
115#define SS_DISABLE 2
116
117#define MINSIGSTKSZ 2048
118#define SIGSTKSZ 8192
119
120#include <asm-generic/signal-defs.h>
121
32#ifndef __ASSEMBLY__ 122#ifndef __ASSEMBLY__
123
124# ifdef __KERNEL__
33extern void do_notify_resume(struct pt_regs *, void *, __u32); 125extern void do_notify_resume(struct pt_regs *, void *, __u32);
126# endif /* __KERNEL__ */
127
34#ifdef __i386__ 128#ifdef __i386__
129# ifdef __KERNEL__
35struct old_sigaction { 130struct old_sigaction {
36 __sighandler_t sa_handler; 131 __sighandler_t sa_handler;
37 old_sigset_t sa_mask; 132 old_sigset_t sa_mask;
@@ -50,8 +145,45 @@ struct k_sigaction {
50 struct sigaction sa; 145 struct sigaction sa;
51}; 146};
52 147
148# else /* __KERNEL__ */
149/* Here we must cater to libcs that poke about in kernel headers. */
150
151struct sigaction {
152 union {
153 __sighandler_t _sa_handler;
154 void (*_sa_sigaction)(int, struct siginfo *, void *);
155 } _u;
156 sigset_t sa_mask;
157 unsigned long sa_flags;
158 void (*sa_restorer)(void);
159};
160
161#define sa_handler _u._sa_handler
162#define sa_sigaction _u._sa_sigaction
163
164# endif /* ! __KERNEL__ */
53#else /* __i386__ */ 165#else /* __i386__ */
166
167struct sigaction {
168 __sighandler_t sa_handler;
169 unsigned long sa_flags;
170 __sigrestore_t sa_restorer;
171 sigset_t sa_mask; /* mask last for extensibility */
172};
173
174struct k_sigaction {
175 struct sigaction sa;
176};
177
54#endif /* !__i386__ */ 178#endif /* !__i386__ */
179
180typedef struct sigaltstack {
181 void __user *ss_sp;
182 int ss_flags;
183 size_t ss_size;
184} stack_t;
185
186#ifdef __KERNEL__
55#include <asm/sigcontext.h> 187#include <asm/sigcontext.h>
56 188
57#ifdef __i386__ 189#ifdef __i386__
@@ -124,5 +256,9 @@ struct pt_regs;
124 256
125#endif /* !__i386__ */ 257#endif /* !__i386__ */
126 258
259#define ptrace_signal_deliver(regs, cookie) do { } while (0)
260
261#endif /* __KERNEL__ */
127#endif /* __ASSEMBLY__ */ 262#endif /* __ASSEMBLY__ */
263
128#endif /* _ASM_X86_SIGNAL_H */ 264#endif /* _ASM_X86_SIGNAL_H */
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
deleted file mode 100644
index 8d3120f4e27..00000000000
--- a/arch/x86/include/asm/smap.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * Supervisor Mode Access Prevention support
3 *
4 * Copyright (C) 2012 Intel Corporation
5 * Author: H. Peter Anvin <hpa@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13#ifndef _ASM_X86_SMAP_H
14#define _ASM_X86_SMAP_H
15
16#include <linux/stringify.h>
17#include <asm/nops.h>
18#include <asm/cpufeature.h>
19
20/* "Raw" instruction opcodes */
21#define __ASM_CLAC .byte 0x0f,0x01,0xca
22#define __ASM_STAC .byte 0x0f,0x01,0xcb
23
24#ifdef __ASSEMBLY__
25
26#include <asm/alternative-asm.h>
27
28#ifdef CONFIG_X86_SMAP
29
30#define ASM_CLAC \
31 661: ASM_NOP3 ; \
32 .pushsection .altinstr_replacement, "ax" ; \
33 662: __ASM_CLAC ; \
34 .popsection ; \
35 .pushsection .altinstructions, "a" ; \
36 altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
37 .popsection
38
39#define ASM_STAC \
40 661: ASM_NOP3 ; \
41 .pushsection .altinstr_replacement, "ax" ; \
42 662: __ASM_STAC ; \
43 .popsection ; \
44 .pushsection .altinstructions, "a" ; \
45 altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
46 .popsection
47
48#else /* CONFIG_X86_SMAP */
49
50#define ASM_CLAC
51#define ASM_STAC
52
53#endif /* CONFIG_X86_SMAP */
54
55#else /* __ASSEMBLY__ */
56
57#include <asm/alternative.h>
58
59#ifdef CONFIG_X86_SMAP
60
61static __always_inline void clac(void)
62{
63 /* Note: a barrier is implicit in alternative() */
64 alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
65}
66
67static __always_inline void stac(void)
68{
69 /* Note: a barrier is implicit in alternative() */
70 alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
71}
72
73/* These macros can be used in asm() statements */
74#define ASM_CLAC \
75 ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
76#define ASM_STAC \
77 ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP)
78
79#else /* CONFIG_X86_SMAP */
80
81static inline void clac(void) { }
82static inline void stac(void) { }
83
84#define ASM_CLAC
85#define ASM_STAC
86
87#endif /* CONFIG_X86_SMAP */
88
89#endif /* __ASSEMBLY__ */
90
91#endif /* _ASM_X86_SMAP_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index b073aaea747..73b11bc0ae6 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -31,12 +31,12 @@ static inline bool cpu_has_ht_siblings(void)
31 return has_siblings; 31 return has_siblings;
32} 32}
33 33
34DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 34DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
35DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); 35DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
36/* cpus sharing the last level cache: */ 36/* cpus sharing the last level cache: */
37DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); 37DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
38DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); 38DECLARE_PER_CPU(u16, cpu_llc_id);
39DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); 39DECLARE_PER_CPU(int, cpu_number);
40 40
41static inline struct cpumask *cpu_sibling_mask(int cpu) 41static inline struct cpumask *cpu_sibling_mask(int cpu)
42{ 42{
@@ -53,17 +53,15 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
53 return per_cpu(cpu_llc_shared_map, cpu); 53 return per_cpu(cpu_llc_shared_map, cpu);
54} 54}
55 55
56DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); 56DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
57DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); 57DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
58#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 58#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
59DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid); 59DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
60#endif 60#endif
61 61
62/* Static state in head.S used to set up a CPU */ 62/* Static state in head.S used to set up a CPU */
63extern unsigned long stack_start; /* Initial stack pointer address */ 63extern unsigned long stack_start; /* Initial stack pointer address */
64 64
65struct task_struct;
66
67struct smp_ops { 65struct smp_ops {
68 void (*smp_prepare_boot_cpu)(void); 66 void (*smp_prepare_boot_cpu)(void);
69 void (*smp_prepare_cpus)(unsigned max_cpus); 67 void (*smp_prepare_cpus)(unsigned max_cpus);
@@ -72,7 +70,7 @@ struct smp_ops {
72 void (*stop_other_cpus)(int wait); 70 void (*stop_other_cpus)(int wait);
73 void (*smp_send_reschedule)(int cpu); 71 void (*smp_send_reschedule)(int cpu);
74 72
75 int (*cpu_up)(unsigned cpu, struct task_struct *tidle); 73 int (*cpu_up)(unsigned cpu);
76 int (*cpu_disable)(void); 74 int (*cpu_disable)(void);
77 void (*cpu_die)(unsigned int cpu); 75 void (*cpu_die)(unsigned int cpu);
78 void (*play_dead)(void); 76 void (*play_dead)(void);
@@ -115,9 +113,9 @@ static inline void smp_cpus_done(unsigned int max_cpus)
115 smp_ops.smp_cpus_done(max_cpus); 113 smp_ops.smp_cpus_done(max_cpus);
116} 114}
117 115
118static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle) 116static inline int __cpu_up(unsigned int cpu)
119{ 117{
120 return smp_ops.cpu_up(cpu, tidle); 118 return smp_ops.cpu_up(cpu);
121} 119}
122 120
123static inline int __cpu_disable(void) 121static inline int __cpu_disable(void)
@@ -154,7 +152,7 @@ void cpu_disable_common(void);
154void native_smp_prepare_boot_cpu(void); 152void native_smp_prepare_boot_cpu(void);
155void native_smp_prepare_cpus(unsigned int max_cpus); 153void native_smp_prepare_cpus(unsigned int max_cpus);
156void native_smp_cpus_done(unsigned int max_cpus); 154void native_smp_cpus_done(unsigned int max_cpus);
157int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); 155int native_cpu_up(unsigned int cpunum);
158int native_cpu_disable(void); 156int native_cpu_disable(void);
159void native_cpu_die(unsigned int cpu); 157void native_cpu_die(unsigned int cpu);
160void native_play_dead(void); 158void native_play_dead(void);
@@ -164,12 +162,15 @@ int wbinvd_on_all_cpus(void);
164 162
165void native_send_call_func_ipi(const struct cpumask *mask); 163void native_send_call_func_ipi(const struct cpumask *mask);
166void native_send_call_func_single_ipi(int cpu); 164void native_send_call_func_single_ipi(int cpu);
167void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
168 165
169void smp_store_boot_cpu_info(void);
170void smp_store_cpu_info(int id); 166void smp_store_cpu_info(int id);
171#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 167#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
172 168
169/* We don't mark CPUs online until __cpu_up(), so we need another measure */
170static inline int num_booting_cpus(void)
171{
172 return cpumask_weight(cpu_callout_mask);
173}
173#else /* !CONFIG_SMP */ 174#else /* !CONFIG_SMP */
174#define wbinvd_on_cpu(cpu) wbinvd() 175#define wbinvd_on_cpu(cpu) wbinvd()
175static inline int wbinvd_on_all_cpus(void) 176static inline int wbinvd_on_all_cpus(void)
@@ -187,11 +188,11 @@ extern unsigned disabled_cpus __cpuinitdata;
187 * from the initial startup. We map APIC_BASE very early in page_setup(), 188 * from the initial startup. We map APIC_BASE very early in page_setup(),
188 * so this is correct in the x86 case. 189 * so this is correct in the x86 case.
189 */ 190 */
190#define raw_smp_processor_id() (this_cpu_read(cpu_number)) 191#define raw_smp_processor_id() (percpu_read(cpu_number))
191extern int safe_smp_processor_id(void); 192extern int safe_smp_processor_id(void);
192 193
193#elif defined(CONFIG_X86_64_SMP) 194#elif defined(CONFIG_X86_64_SMP)
194#define raw_smp_processor_id() (this_cpu_read(cpu_number)) 195#define raw_smp_processor_id() (percpu_read(cpu_number))
195 196
196#define stack_smp_processor_id() \ 197#define stack_smp_processor_id() \
197({ \ 198({ \
@@ -224,11 +225,5 @@ extern int hard_smp_processor_id(void);
224 225
225#endif /* CONFIG_X86_LOCAL_APIC */ 226#endif /* CONFIG_X86_LOCAL_APIC */
226 227
227#ifdef CONFIG_DEBUG_NMI_SELFTEST
228extern void nmi_selftest(void);
229#else
230#define nmi_selftest() do { } while (0)
231#endif
232
233#endif /* __ASSEMBLY__ */ 228#endif /* __ASSEMBLY__ */
234#endif /* _ASM_X86_SMP_H */ 229#endif /* _ASM_X86_SMP_H */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
deleted file mode 100644
index 41fc93a2e22..00000000000
--- a/arch/x86/include/asm/special_insns.h
+++ /dev/null
@@ -1,199 +0,0 @@
1#ifndef _ASM_X86_SPECIAL_INSNS_H
2#define _ASM_X86_SPECIAL_INSNS_H
3
4
5#ifdef __KERNEL__
6
7static inline void native_clts(void)
8{
9 asm volatile("clts");
10}
11
12/*
13 * Volatile isn't enough to prevent the compiler from reordering the
14 * read/write functions for the control registers and messing everything up.
15 * A memory clobber would solve the problem, but would prevent reordering of
16 * all loads stores around it, which can hurt performance. Solution is to
17 * use a variable and mimic reads and writes to it to enforce serialization
18 */
19static unsigned long __force_order;
20
21static inline unsigned long native_read_cr0(void)
22{
23 unsigned long val;
24 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
25 return val;
26}
27
28static inline void native_write_cr0(unsigned long val)
29{
30 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
31}
32
33static inline unsigned long native_read_cr2(void)
34{
35 unsigned long val;
36 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
37 return val;
38}
39
40static inline void native_write_cr2(unsigned long val)
41{
42 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
43}
44
45static inline unsigned long native_read_cr3(void)
46{
47 unsigned long val;
48 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
49 return val;
50}
51
52static inline void native_write_cr3(unsigned long val)
53{
54 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
55}
56
57static inline unsigned long native_read_cr4(void)
58{
59 unsigned long val;
60 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
61 return val;
62}
63
64static inline unsigned long native_read_cr4_safe(void)
65{
66 unsigned long val;
67 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
68 * exists, so it will never fail. */
69#ifdef CONFIG_X86_32
70 asm volatile("1: mov %%cr4, %0\n"
71 "2:\n"
72 _ASM_EXTABLE(1b, 2b)
73 : "=r" (val), "=m" (__force_order) : "0" (0));
74#else
75 val = native_read_cr4();
76#endif
77 return val;
78}
79
80static inline void native_write_cr4(unsigned long val)
81{
82 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
83}
84
85#ifdef CONFIG_X86_64
86static inline unsigned long native_read_cr8(void)
87{
88 unsigned long cr8;
89 asm volatile("movq %%cr8,%0" : "=r" (cr8));
90 return cr8;
91}
92
93static inline void native_write_cr8(unsigned long val)
94{
95 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
96}
97#endif
98
99static inline void native_wbinvd(void)
100{
101 asm volatile("wbinvd": : :"memory");
102}
103
104extern void native_load_gs_index(unsigned);
105
106#ifdef CONFIG_PARAVIRT
107#include <asm/paravirt.h>
108#else
109
110static inline unsigned long read_cr0(void)
111{
112 return native_read_cr0();
113}
114
115static inline void write_cr0(unsigned long x)
116{
117 native_write_cr0(x);
118}
119
120static inline unsigned long read_cr2(void)
121{
122 return native_read_cr2();
123}
124
125static inline void write_cr2(unsigned long x)
126{
127 native_write_cr2(x);
128}
129
130static inline unsigned long read_cr3(void)
131{
132 return native_read_cr3();
133}
134
135static inline void write_cr3(unsigned long x)
136{
137 native_write_cr3(x);
138}
139
140static inline unsigned long read_cr4(void)
141{
142 return native_read_cr4();
143}
144
145static inline unsigned long read_cr4_safe(void)
146{
147 return native_read_cr4_safe();
148}
149
150static inline void write_cr4(unsigned long x)
151{
152 native_write_cr4(x);
153}
154
155static inline void wbinvd(void)
156{
157 native_wbinvd();
158}
159
160#ifdef CONFIG_X86_64
161
162static inline unsigned long read_cr8(void)
163{
164 return native_read_cr8();
165}
166
167static inline void write_cr8(unsigned long x)
168{
169 native_write_cr8(x);
170}
171
172static inline void load_gs_index(unsigned selector)
173{
174 native_load_gs_index(selector);
175}
176
177#endif
178
179/* Clear the 'TS' bit */
180static inline void clts(void)
181{
182 native_clts();
183}
184
185#endif/* CONFIG_PARAVIRT */
186
187#define stts() write_cr0(read_cr0() | X86_CR0_TS)
188
189static inline void clflush(volatile void *__p)
190{
191 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
192}
193
194#define nop() asm volatile ("nop")
195
196
197#endif /* __KERNEL__ */
198
199#endif /* _ASM_X86_SPECIAL_INSNS_H */
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 33692eaabab..ee67edf86fd 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -12,15 +12,18 @@
12 * Simple spin lock operations. There are two variants, one clears IRQ's 12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not. 13 * on the local processor, one does not.
14 * 14 *
15 * These are fair FIFO ticket locks, which support up to 2^16 CPUs. 15 * These are fair FIFO ticket locks, which are currently limited to 256
16 * CPUs.
16 * 17 *
17 * (the type definitions are in asm/spinlock_types.h) 18 * (the type definitions are in asm/spinlock_types.h)
18 */ 19 */
19 20
20#ifdef CONFIG_X86_32 21#ifdef CONFIG_X86_32
21# define LOCK_PTR_REG "a" 22# define LOCK_PTR_REG "a"
23# define REG_PTR_MODE "k"
22#else 24#else
23# define LOCK_PTR_REG "D" 25# define LOCK_PTR_REG "D"
26# define REG_PTR_MODE "q"
24#endif 27#endif
25 28
26#if defined(CONFIG_X86_32) && \ 29#if defined(CONFIG_X86_32) && \
@@ -46,53 +49,126 @@
46 * issues and should be optimal for the uncontended case. Note the tail must be 49 * issues and should be optimal for the uncontended case. Note the tail must be
47 * in the high part, because a wide xadd increment of the low part would carry 50 * in the high part, because a wide xadd increment of the low part would carry
48 * up and contaminate the high part. 51 * up and contaminate the high part.
52 *
53 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
54 * save some instructions and make the code more elegant. There really isn't
55 * much between them in performance though, especially as locks are out of line.
49 */ 56 */
57#if (NR_CPUS < 256)
58#define TICKET_SHIFT 8
59
50static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 60static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
51{ 61{
52 register struct __raw_tickets inc = { .tail = 1 }; 62 short inc = 0x0100;
53 63
54 inc = xadd(&lock->tickets, inc); 64 asm volatile (
55 65 LOCK_PREFIX "xaddw %w0, %1\n"
56 for (;;) { 66 "1:\t"
57 if (inc.head == inc.tail) 67 "cmpb %h0, %b0\n\t"
58 break; 68 "je 2f\n\t"
59 cpu_relax(); 69 "rep ; nop\n\t"
60 inc.head = ACCESS_ONCE(lock->tickets.head); 70 "movb %1, %b0\n\t"
61 } 71 /* don't need lfence here, because loads are in-order */
62 barrier(); /* make sure nothing creeps before the lock is taken */ 72 "jmp 1b\n"
73 "2:"
74 : "+Q" (inc), "+m" (lock->slock)
75 :
76 : "memory", "cc");
63} 77}
64 78
65static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 79static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
66{ 80{
67 arch_spinlock_t old, new; 81 int tmp, new;
82
83 asm volatile("movzwl %2, %0\n\t"
84 "cmpb %h0,%b0\n\t"
85 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
86 "jne 1f\n\t"
87 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
88 "1:"
89 "sete %b1\n\t"
90 "movzbl %b1,%0\n\t"
91 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
92 :
93 : "memory", "cc");
94
95 return tmp;
96}
68 97
69 old.tickets = ACCESS_ONCE(lock->tickets); 98static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
70 if (old.tickets.head != old.tickets.tail) 99{
71 return 0; 100 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
101 : "+m" (lock->slock)
102 :
103 : "memory", "cc");
104}
105#else
106#define TICKET_SHIFT 16
72 107
73 new.head_tail = old.head_tail + (1 << TICKET_SHIFT); 108static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
109{
110 int inc = 0x00010000;
111 int tmp;
112
113 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
114 "movzwl %w0, %2\n\t"
115 "shrl $16, %0\n\t"
116 "1:\t"
117 "cmpl %0, %2\n\t"
118 "je 2f\n\t"
119 "rep ; nop\n\t"
120 "movzwl %1, %2\n\t"
121 /* don't need lfence here, because loads are in-order */
122 "jmp 1b\n"
123 "2:"
124 : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
125 :
126 : "memory", "cc");
127}
74 128
75 /* cmpxchg is a full barrier, so nothing can move before it */ 129static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
76 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 130{
131 int tmp;
132 int new;
133
134 asm volatile("movl %2,%0\n\t"
135 "movl %0,%1\n\t"
136 "roll $16, %0\n\t"
137 "cmpl %0,%1\n\t"
138 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
139 "jne 1f\n\t"
140 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
141 "1:"
142 "sete %b1\n\t"
143 "movzbl %b1,%0\n\t"
144 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
145 :
146 : "memory", "cc");
147
148 return tmp;
77} 149}
78 150
79static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 151static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
80{ 152{
81 __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); 153 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
154 : "+m" (lock->slock)
155 :
156 : "memory", "cc");
82} 157}
158#endif
83 159
84static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 160static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
85{ 161{
86 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 162 int tmp = ACCESS_ONCE(lock->slock);
87 163
88 return tmp.tail != tmp.head; 164 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
89} 165}
90 166
91static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 167static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
92{ 168{
93 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 169 int tmp = ACCESS_ONCE(lock->slock);
94 170
95 return (__ticket_t)(tmp.tail - tmp.head) > 1; 171 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
96} 172}
97 173
98#ifndef CONFIG_PARAVIRT_SPINLOCKS 174#ifndef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index ad0ad07fc00..7c7a486fcb6 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,28 +5,11 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8#include <linux/types.h>
9
10#if (CONFIG_NR_CPUS < 256)
11typedef u8 __ticket_t;
12typedef u16 __ticketpair_t;
13#else
14typedef u16 __ticket_t;
15typedef u32 __ticketpair_t;
16#endif
17
18#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
19
20typedef struct arch_spinlock { 8typedef struct arch_spinlock {
21 union { 9 unsigned int slock;
22 __ticketpair_t head_tail;
23 struct __raw_tickets {
24 __ticket_t head, tail;
25 } tickets;
26 };
27} arch_spinlock_t; 10} arch_spinlock_t;
28 11
29#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
30 13
31#include <asm/rwlock.h> 14#include <asm/rwlock.h>
32 15
diff --git a/arch/x86/include/asm/sta2x11.h b/arch/x86/include/asm/sta2x11.h
deleted file mode 100644
index e9d32df89cc..00000000000
--- a/arch/x86/include/asm/sta2x11.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * Header file for STMicroelectronics ConneXt (STA2X11) IOHub
3 */
4#ifndef __ASM_STA2X11_H
5#define __ASM_STA2X11_H
6
7#include <linux/pci.h>
8
9/* This needs to be called from the MFD to configure its sub-devices */
10struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
11
12#endif /* __ASM_STA2X11_H */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 6a998598f17..15751776356 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -38,6 +38,7 @@
38#include <asm/tsc.h> 38#include <asm/tsc.h>
39#include <asm/processor.h> 39#include <asm/processor.h>
40#include <asm/percpu.h> 40#include <asm/percpu.h>
41#include <asm/system.h>
41#include <asm/desc.h> 42#include <asm/desc.h>
42#include <linux/random.h> 43#include <linux/random.h>
43 44
@@ -75,9 +76,9 @@ static __always_inline void boot_init_stack_canary(void)
75 76
76 current->stack_canary = canary; 77 current->stack_canary = canary;
77#ifdef CONFIG_X86_64 78#ifdef CONFIG_X86_64
78 this_cpu_write(irq_stack_union.stack_canary, canary); 79 percpu_write(irq_stack_union.stack_canary, canary);
79#else 80#else
80 this_cpu_write(stack_canary.canary, canary); 81 percpu_write(stack_canary.canary, canary);
81#endif 82#endif
82} 83}
83 84
diff --git a/arch/x86/include/asm/string.h b/arch/x86/include/asm/string.h
index 09224d7a586..6dfd6d9373a 100644
--- a/arch/x86/include/asm/string.h
+++ b/arch/x86/include/asm/string.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include <asm/string_32.h> 2# include "string_32.h"
3#else 3#else
4# include <asm/string_64.h> 4# include "string_64.h"
5#endif 5#endif
diff --git a/arch/x86/include/asm/suspend.h b/arch/x86/include/asm/suspend.h
index 2fab6c2c357..9bd521fe457 100644
--- a/arch/x86/include/asm/suspend.h
+++ b/arch/x86/include/asm/suspend.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include <asm/suspend_32.h> 2# include "suspend_32.h"
3#else 3#else
4# include <asm/suspend_64.h> 4# include "suspend_64.h"
5#endif 5#endif
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 6136d99f537..f2b83bc7d78 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -1,9 +1,6 @@
1#ifndef __SVM_H 1#ifndef __SVM_H
2#define __SVM_H 2#define __SVM_H
3 3
4#include <uapi/asm/svm.h>
5
6
7enum { 4enum {
8 INTERCEPT_INTR, 5 INTERCEPT_INTR,
9 INTERCEPT_NMI, 6 INTERCEPT_NMI,
@@ -267,6 +264,81 @@ struct __attribute__ ((__packed__)) vmcb {
267 264
268#define SVM_EXITINFO_REG_MASK 0x0F 265#define SVM_EXITINFO_REG_MASK 0x0F
269 266
267#define SVM_EXIT_READ_CR0 0x000
268#define SVM_EXIT_READ_CR3 0x003
269#define SVM_EXIT_READ_CR4 0x004
270#define SVM_EXIT_READ_CR8 0x008
271#define SVM_EXIT_WRITE_CR0 0x010
272#define SVM_EXIT_WRITE_CR3 0x013
273#define SVM_EXIT_WRITE_CR4 0x014
274#define SVM_EXIT_WRITE_CR8 0x018
275#define SVM_EXIT_READ_DR0 0x020
276#define SVM_EXIT_READ_DR1 0x021
277#define SVM_EXIT_READ_DR2 0x022
278#define SVM_EXIT_READ_DR3 0x023
279#define SVM_EXIT_READ_DR4 0x024
280#define SVM_EXIT_READ_DR5 0x025
281#define SVM_EXIT_READ_DR6 0x026
282#define SVM_EXIT_READ_DR7 0x027
283#define SVM_EXIT_WRITE_DR0 0x030
284#define SVM_EXIT_WRITE_DR1 0x031
285#define SVM_EXIT_WRITE_DR2 0x032
286#define SVM_EXIT_WRITE_DR3 0x033
287#define SVM_EXIT_WRITE_DR4 0x034
288#define SVM_EXIT_WRITE_DR5 0x035
289#define SVM_EXIT_WRITE_DR6 0x036
290#define SVM_EXIT_WRITE_DR7 0x037
291#define SVM_EXIT_EXCP_BASE 0x040
292#define SVM_EXIT_INTR 0x060
293#define SVM_EXIT_NMI 0x061
294#define SVM_EXIT_SMI 0x062
295#define SVM_EXIT_INIT 0x063
296#define SVM_EXIT_VINTR 0x064
297#define SVM_EXIT_CR0_SEL_WRITE 0x065
298#define SVM_EXIT_IDTR_READ 0x066
299#define SVM_EXIT_GDTR_READ 0x067
300#define SVM_EXIT_LDTR_READ 0x068
301#define SVM_EXIT_TR_READ 0x069
302#define SVM_EXIT_IDTR_WRITE 0x06a
303#define SVM_EXIT_GDTR_WRITE 0x06b
304#define SVM_EXIT_LDTR_WRITE 0x06c
305#define SVM_EXIT_TR_WRITE 0x06d
306#define SVM_EXIT_RDTSC 0x06e
307#define SVM_EXIT_RDPMC 0x06f
308#define SVM_EXIT_PUSHF 0x070
309#define SVM_EXIT_POPF 0x071
310#define SVM_EXIT_CPUID 0x072
311#define SVM_EXIT_RSM 0x073
312#define SVM_EXIT_IRET 0x074
313#define SVM_EXIT_SWINT 0x075
314#define SVM_EXIT_INVD 0x076
315#define SVM_EXIT_PAUSE 0x077
316#define SVM_EXIT_HLT 0x078
317#define SVM_EXIT_INVLPG 0x079
318#define SVM_EXIT_INVLPGA 0x07a
319#define SVM_EXIT_IOIO 0x07b
320#define SVM_EXIT_MSR 0x07c
321#define SVM_EXIT_TASK_SWITCH 0x07d
322#define SVM_EXIT_FERR_FREEZE 0x07e
323#define SVM_EXIT_SHUTDOWN 0x07f
324#define SVM_EXIT_VMRUN 0x080
325#define SVM_EXIT_VMMCALL 0x081
326#define SVM_EXIT_VMLOAD 0x082
327#define SVM_EXIT_VMSAVE 0x083
328#define SVM_EXIT_STGI 0x084
329#define SVM_EXIT_CLGI 0x085
330#define SVM_EXIT_SKINIT 0x086
331#define SVM_EXIT_RDTSCP 0x087
332#define SVM_EXIT_ICEBP 0x088
333#define SVM_EXIT_WBINVD 0x089
334#define SVM_EXIT_MONITOR 0x08a
335#define SVM_EXIT_MWAIT 0x08b
336#define SVM_EXIT_MWAIT_COND 0x08c
337#define SVM_EXIT_XSETBV 0x08d
338#define SVM_EXIT_NPF 0x400
339
340#define SVM_EXIT_ERR -1
341
270#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) 342#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
271 343
272#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" 344#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
@@ -277,3 +349,4 @@ struct __attribute__ ((__packed__)) vmcb {
277#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf" 349#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
278 350
279#endif 351#endif
352
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
deleted file mode 100644
index 4ec45b3abba..00000000000
--- a/arch/x86/include/asm/switch_to.h
+++ /dev/null
@@ -1,129 +0,0 @@
1#ifndef _ASM_X86_SWITCH_TO_H
2#define _ASM_X86_SWITCH_TO_H
3
4struct task_struct; /* one of the stranger aspects of C forward declarations */
5struct task_struct *__switch_to(struct task_struct *prev,
6 struct task_struct *next);
7struct tss_struct;
8void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
9 struct tss_struct *tss);
10
11#ifdef CONFIG_X86_32
12
13#ifdef CONFIG_CC_STACKPROTECTOR
14#define __switch_canary \
15 "movl %P[task_canary](%[next]), %%ebx\n\t" \
16 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
17#define __switch_canary_oparam \
18 , [stack_canary] "=m" (stack_canary.canary)
19#define __switch_canary_iparam \
20 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
21#else /* CC_STACKPROTECTOR */
22#define __switch_canary
23#define __switch_canary_oparam
24#define __switch_canary_iparam
25#endif /* CC_STACKPROTECTOR */
26
27/*
28 * Saving eflags is important. It switches not only IOPL between tasks,
29 * it also protects other tasks from NT leaking through sysenter etc.
30 */
31#define switch_to(prev, next, last) \
32do { \
33 /* \
34 * Context-switching clobbers all registers, so we clobber \
35 * them explicitly, via unused output variables. \
36 * (EAX and EBP is not listed because EBP is saved/restored \
37 * explicitly for wchan access and EAX is the return value of \
38 * __switch_to()) \
39 */ \
40 unsigned long ebx, ecx, edx, esi, edi; \
41 \
42 asm volatile("pushfl\n\t" /* save flags */ \
43 "pushl %%ebp\n\t" /* save EBP */ \
44 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
45 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
46 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
47 "pushl %[next_ip]\n\t" /* restore EIP */ \
48 __switch_canary \
49 "jmp __switch_to\n" /* regparm call */ \
50 "1:\t" \
51 "popl %%ebp\n\t" /* restore EBP */ \
52 "popfl\n" /* restore flags */ \
53 \
54 /* output parameters */ \
55 : [prev_sp] "=m" (prev->thread.sp), \
56 [prev_ip] "=m" (prev->thread.ip), \
57 "=a" (last), \
58 \
59 /* clobbered output registers: */ \
60 "=b" (ebx), "=c" (ecx), "=d" (edx), \
61 "=S" (esi), "=D" (edi) \
62 \
63 __switch_canary_oparam \
64 \
65 /* input parameters: */ \
66 : [next_sp] "m" (next->thread.sp), \
67 [next_ip] "m" (next->thread.ip), \
68 \
69 /* regparm parameters for __switch_to(): */ \
70 [prev] "a" (prev), \
71 [next] "d" (next) \
72 \
73 __switch_canary_iparam \
74 \
75 : /* reloaded segment registers */ \
76 "memory"); \
77} while (0)
78
79#else /* CONFIG_X86_32 */
80
81/* frame pointer must be last for get_wchan */
82#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
84
85#define __EXTRA_CLOBBER \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15"
88
89#ifdef CONFIG_CC_STACKPROTECTOR
90#define __switch_canary \
91 "movq %P[task_canary](%%rsi),%%r8\n\t" \
92 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
93#define __switch_canary_oparam \
94 , [gs_canary] "=m" (irq_stack_union.stack_canary)
95#define __switch_canary_iparam \
96 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
97#else /* CC_STACKPROTECTOR */
98#define __switch_canary
99#define __switch_canary_oparam
100#define __switch_canary_iparam
101#endif /* CC_STACKPROTECTOR */
102
103/* Save restore flags to clear handle leaking NT */
104#define switch_to(prev, next, last) \
105 asm volatile(SAVE_CONTEXT \
106 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
107 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
108 "call __switch_to\n\t" \
109 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
110 __switch_canary \
111 "movq %P[thread_info](%%rsi),%%r8\n\t" \
112 "movq %%rax,%%rdi\n\t" \
113 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
114 "jnz ret_from_fork\n\t" \
115 RESTORE_CONTEXT \
116 : "=a" (last) \
117 __switch_canary_oparam \
118 : [next] "S" (next), [prev] "D" (prev), \
119 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
120 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
121 [_tif_fork] "i" (_TIF_FORK), \
122 [thread_info] "i" (offsetof(struct task_struct, stack)), \
123 [current_task] "m" (current_task) \
124 __switch_canary_iparam \
125 : "memory", "cc" __EXTRA_CLOBBER)
126
127#endif /* CONFIG_X86_32 */
128
129#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 31f61f96e0f..cb238526a9f 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -10,8 +10,6 @@
10#ifndef _ASM_X86_SYS_IA32_H 10#ifndef _ASM_X86_SYS_IA32_H
11#define _ASM_X86_SYS_IA32_H 11#define _ASM_X86_SYS_IA32_H
12 12
13#ifdef CONFIG_COMPAT
14
15#include <linux/compiler.h> 13#include <linux/compiler.h>
16#include <linux/linkage.h> 14#include <linux/linkage.h>
17#include <linux/types.h> 15#include <linux/types.h>
@@ -38,9 +36,11 @@ asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
38 struct sigaction32 __user *, unsigned int); 36 struct sigaction32 __user *, unsigned int);
39asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, 37asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
40 struct old_sigaction32 __user *); 38 struct old_sigaction32 __user *);
39asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
40 compat_sigset_t __user *, unsigned int);
41asmlinkage long sys32_alarm(unsigned int); 41asmlinkage long sys32_alarm(unsigned int);
42 42
43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); 43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
44asmlinkage long sys32_sysfs(int, u32, u32); 44asmlinkage long sys32_sysfs(int, u32, u32);
45 45
46asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, 46asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
@@ -54,6 +54,10 @@ asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
54asmlinkage long sys32_personality(unsigned long); 54asmlinkage long sys32_personality(unsigned long);
55asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); 55asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
56 56
57asmlinkage long sys32_execve(const char __user *, compat_uptr_t __user *,
58 compat_uptr_t __user *, struct pt_regs *);
59asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *);
60
57long sys32_lseek(unsigned int, int, unsigned int); 61long sys32_lseek(unsigned int, int, unsigned int);
58long sys32_kill(int, int); 62long sys32_kill(int, int);
59long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); 63long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
@@ -69,6 +73,8 @@ asmlinkage long sys32_fallocate(int, int, unsigned,
69 73
70/* ia32/ia32_signal.c */ 74/* ia32/ia32_signal.c */
71asmlinkage long sys32_sigsuspend(int, int, old_sigset_t); 75asmlinkage long sys32_sigsuspend(int, int, old_sigset_t);
76asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *,
77 stack_ia32_t __user *, struct pt_regs *);
72asmlinkage long sys32_sigreturn(struct pt_regs *); 78asmlinkage long sys32_sigreturn(struct pt_regs *);
73asmlinkage long sys32_rt_sigreturn(struct pt_regs *); 79asmlinkage long sys32_rt_sigreturn(struct pt_regs *);
74 80
@@ -77,7 +83,4 @@ asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
77 83
78asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, 84asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
79 const char __user *); 85 const char __user *);
80
81#endif /* CONFIG_COMPAT */
82
83#endif /* _ASM_X86_SYS_IA32_H */ 86#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 1ace47b6259..c4a348f7bd4 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -13,12 +13,8 @@
13#ifndef _ASM_X86_SYSCALL_H 13#ifndef _ASM_X86_SYSCALL_H
14#define _ASM_X86_SYSCALL_H 14#define _ASM_X86_SYSCALL_H
15 15
16#include <linux/audit.h>
17#include <linux/sched.h> 16#include <linux/sched.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <asm/asm-offsets.h> /* For NR_syscalls */
20#include <asm/thread_info.h> /* for TS_COMPAT */
21#include <asm/unistd.h>
22 18
23extern const unsigned long sys_call_table[]; 19extern const unsigned long sys_call_table[];
24 20
@@ -29,13 +25,13 @@ extern const unsigned long sys_call_table[];
29 */ 25 */
30static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 26static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
31{ 27{
32 return regs->orig_ax & __SYSCALL_MASK; 28 return regs->orig_ax;
33} 29}
34 30
35static inline void syscall_rollback(struct task_struct *task, 31static inline void syscall_rollback(struct task_struct *task,
36 struct pt_regs *regs) 32 struct pt_regs *regs)
37{ 33{
38 regs->ax = regs->orig_ax & __SYSCALL_MASK; 34 regs->ax = regs->orig_ax;
39} 35}
40 36
41static inline long syscall_get_error(struct task_struct *task, 37static inline long syscall_get_error(struct task_struct *task,
@@ -90,12 +86,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
90 memcpy(&regs->bx + i, args, n * sizeof(args[0])); 86 memcpy(&regs->bx + i, args, n * sizeof(args[0]));
91} 87}
92 88
93static inline int syscall_get_arch(struct task_struct *task,
94 struct pt_regs *regs)
95{
96 return AUDIT_ARCH_I386;
97}
98
99#else /* CONFIG_X86_64 */ 89#else /* CONFIG_X86_64 */
100 90
101static inline void syscall_get_arguments(struct task_struct *task, 91static inline void syscall_get_arguments(struct task_struct *task,
@@ -220,25 +210,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
220 } 210 }
221} 211}
222 212
223static inline int syscall_get_arch(struct task_struct *task,
224 struct pt_regs *regs)
225{
226#ifdef CONFIG_IA32_EMULATION
227 /*
228 * TS_COMPAT is set for 32-bit syscall entry and then
229 * remains set until we return to user mode.
230 *
231 * TIF_IA32 tasks should always have TS_COMPAT set at
232 * system call time.
233 *
234 * x32 tasks should be considered AUDIT_ARCH_X86_64.
235 */
236 if (task_thread_info(task)->status & TS_COMPAT)
237 return AUDIT_ARCH_I386;
238#endif
239 /* Both x32 and x86_64 are considered "64-bit". */
240 return AUDIT_ARCH_X86_64;
241}
242#endif /* CONFIG_X86_32 */ 213#endif /* CONFIG_X86_32 */
243 214
244#endif /* _ASM_X86_SYSCALL_H */ 215#endif /* _ASM_X86_SYSCALL_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 58b7e3eac0a..f1d8b441fc7 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -20,11 +20,23 @@
20asmlinkage long sys_ioperm(unsigned long, unsigned long, int); 20asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
21long sys_iopl(unsigned int, struct pt_regs *); 21long sys_iopl(unsigned int, struct pt_regs *);
22 22
23/* kernel/process.c */
24int sys_fork(struct pt_regs *);
25int sys_vfork(struct pt_regs *);
26long sys_execve(const char __user *,
27 const char __user *const __user *,
28 const char __user *const __user *, struct pt_regs *);
29long sys_clone(unsigned long, unsigned long, void __user *,
30 void __user *, struct pt_regs *);
31
23/* kernel/ldt.c */ 32/* kernel/ldt.c */
24asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); 33asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
25 34
26/* kernel/signal.c */ 35/* kernel/signal.c */
27long sys_rt_sigreturn(struct pt_regs *); 36long sys_rt_sigreturn(struct pt_regs *);
37long sys_sigaltstack(const stack_t __user *, stack_t __user *,
38 struct pt_regs *);
39
28 40
29/* kernel/tls.c */ 41/* kernel/tls.c */
30asmlinkage int sys_set_thread_area(struct user_desc __user *); 42asmlinkage int sys_set_thread_area(struct user_desc __user *);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2d946e63ee8..a1fe5c127b5 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,8 +40,7 @@ struct thread_info {
40 */ 40 */
41 __u8 supervisor_stack[0]; 41 __u8 supervisor_stack[0];
42#endif 42#endif
43 unsigned int sig_on_uaccess_error:1; 43 int uaccess_err;
44 unsigned int uaccess_err:1; /* uaccess failed */
45}; 44};
46 45
47#define INIT_THREAD_INFO(tsk) \ 46#define INIT_THREAD_INFO(tsk) \
@@ -79,60 +78,55 @@ struct thread_info {
79#define TIF_SIGPENDING 2 /* signal pending */ 78#define TIF_SIGPENDING 2 /* signal pending */
80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 79#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 80#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
81#define TIF_IRET 5 /* force IRET */
82#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 82#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
83#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 83#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
84#define TIF_SECCOMP 8 /* secure computing */ 84#define TIF_SECCOMP 8 /* secure computing */
85#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ 85#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
86#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ 86#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
87#define TIF_UPROBE 12 /* breakpointed or singlestepping */
88#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 87#define TIF_NOTSC 16 /* TSC is not accessible in userland */
89#define TIF_IA32 17 /* IA32 compatibility process */ 88#define TIF_IA32 17 /* 32bit process */
90#define TIF_FORK 18 /* ret_from_fork */ 89#define TIF_FORK 18 /* ret_from_fork */
91#define TIF_NOHZ 19 /* in adaptive nohz mode */
92#define TIF_MEMDIE 20 /* is terminating due to OOM killer */ 90#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
93#define TIF_DEBUG 21 /* uses debug registers */ 91#define TIF_DEBUG 21 /* uses debug registers */
94#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ 92#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
93#define TIF_FREEZE 23 /* is freezing for suspend */
95#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ 94#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
96#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ 95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
97#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ 96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
98#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ 97#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
99#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
100#define TIF_X32 30 /* 32-bit native x86-64 binary */
101 98
102#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 99#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
103#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
104#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 101#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
105#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 102#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
106#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 103#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
104#define _TIF_IRET (1 << TIF_IRET)
107#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 105#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
108#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 106#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
109#define _TIF_SECCOMP (1 << TIF_SECCOMP) 107#define _TIF_SECCOMP (1 << TIF_SECCOMP)
110#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) 108#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
111#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) 109#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
112#define _TIF_UPROBE (1 << TIF_UPROBE)
113#define _TIF_NOTSC (1 << TIF_NOTSC) 110#define _TIF_NOTSC (1 << TIF_NOTSC)
114#define _TIF_IA32 (1 << TIF_IA32) 111#define _TIF_IA32 (1 << TIF_IA32)
115#define _TIF_FORK (1 << TIF_FORK) 112#define _TIF_FORK (1 << TIF_FORK)
116#define _TIF_NOHZ (1 << TIF_NOHZ)
117#define _TIF_DEBUG (1 << TIF_DEBUG) 113#define _TIF_DEBUG (1 << TIF_DEBUG)
118#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) 114#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
115#define _TIF_FREEZE (1 << TIF_FREEZE)
119#define _TIF_FORCED_TF (1 << TIF_FORCED_TF) 116#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
120#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 117#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
121#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) 118#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
122#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 119#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
123#define _TIF_ADDR32 (1 << TIF_ADDR32)
124#define _TIF_X32 (1 << TIF_X32)
125 120
126/* work to do in syscall_trace_enter() */ 121/* work to do in syscall_trace_enter() */
127#define _TIF_WORK_SYSCALL_ENTRY \ 122#define _TIF_WORK_SYSCALL_ENTRY \
128 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ 123 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
129 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \ 124 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
130 _TIF_NOHZ)
131 125
132/* work to do in syscall_trace_leave() */ 126/* work to do in syscall_trace_leave() */
133#define _TIF_WORK_SYSCALL_EXIT \ 127#define _TIF_WORK_SYSCALL_EXIT \
134 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ 128 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
135 _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ) 129 _TIF_SYSCALL_TRACEPOINT)
136 130
137/* work to do on interrupt/exception return */ 131/* work to do on interrupt/exception return */
138#define _TIF_WORK_MASK \ 132#define _TIF_WORK_MASK \
@@ -142,8 +136,7 @@ struct thread_info {
142 136
143/* work to do on any return to user space */ 137/* work to do on any return to user space */
144#define _TIF_ALLWORK_MASK \ 138#define _TIF_ALLWORK_MASK \
145 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ 139 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
146 _TIF_NOHZ)
147 140
148/* Only used for 64 bit */ 141/* Only used for 64 bit */
149#define _TIF_DO_NOTIFY_MASK \ 142#define _TIF_DO_NOTIFY_MASK \
@@ -159,6 +152,24 @@ struct thread_info {
159 152
160#define PREEMPT_ACTIVE 0x10000000 153#define PREEMPT_ACTIVE 0x10000000
161 154
155/* thread information allocation */
156#ifdef CONFIG_DEBUG_STACK_USAGE
157#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
158#else
159#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
160#endif
161
162#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
163
164#define alloc_thread_info_node(tsk, node) \
165({ \
166 struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
167 THREAD_ORDER); \
168 struct thread_info *ret = page ? page_address(page) : NULL; \
169 \
170 ret; \
171})
172
162#ifdef CONFIG_X86_32 173#ifdef CONFIG_X86_32
163 174
164#define STACK_WARN (THREAD_SIZE/8) 175#define STACK_WARN (THREAD_SIZE/8)
@@ -208,7 +219,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack);
208static inline struct thread_info *current_thread_info(void) 219static inline struct thread_info *current_thread_info(void)
209{ 220{
210 struct thread_info *ti; 221 struct thread_info *ti;
211 ti = (void *)(this_cpu_read_stable(kernel_stack) + 222 ti = (void *)(percpu_read_stable(kernel_stack) +
212 KERNEL_STACK_OFFSET - THREAD_SIZE); 223 KERNEL_STACK_OFFSET - THREAD_SIZE);
213 return ti; 224 return ti;
214} 225}
@@ -220,12 +231,6 @@ static inline struct thread_info *current_thread_info(void)
220 movq PER_CPU_VAR(kernel_stack),reg ; \ 231 movq PER_CPU_VAR(kernel_stack),reg ; \
221 subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg 232 subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
222 233
223/*
224 * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
225 * a certain register (to be used in assembler memory operands).
226 */
227#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
228
229#endif 234#endif
230 235
231#endif /* !X86_32 */ 236#endif /* !X86_32 */
@@ -237,6 +242,8 @@ static inline struct thread_info *current_thread_info(void)
237 * ever touches our thread-synchronous status, so we don't 242 * ever touches our thread-synchronous status, so we don't
238 * have to worry about atomic accesses. 243 * have to worry about atomic accesses.
239 */ 244 */
245#define TS_USEDFPU 0x0001 /* FPU was used by this task
246 this quantum (SMP) */
240#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ 247#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
241#define TS_POLLING 0x0004 /* idle task polling need_resched, 248#define TS_POLLING 0x0004 /* idle task polling need_resched,
242 skip sending interrupt */ 249 skip sending interrupt */
@@ -250,41 +257,14 @@ static inline void set_restore_sigmask(void)
250{ 257{
251 struct thread_info *ti = current_thread_info(); 258 struct thread_info *ti = current_thread_info();
252 ti->status |= TS_RESTORE_SIGMASK; 259 ti->status |= TS_RESTORE_SIGMASK;
253 WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags)); 260 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
254}
255static inline void clear_restore_sigmask(void)
256{
257 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
258}
259static inline bool test_restore_sigmask(void)
260{
261 return current_thread_info()->status & TS_RESTORE_SIGMASK;
262}
263static inline bool test_and_clear_restore_sigmask(void)
264{
265 struct thread_info *ti = current_thread_info();
266 if (!(ti->status & TS_RESTORE_SIGMASK))
267 return false;
268 ti->status &= ~TS_RESTORE_SIGMASK;
269 return true;
270}
271
272static inline bool is_ia32_task(void)
273{
274#ifdef CONFIG_X86_32
275 return true;
276#endif
277#ifdef CONFIG_IA32_EMULATION
278 if (current_thread_info()->status & TS_COMPAT)
279 return true;
280#endif
281 return false;
282} 261}
283#endif /* !__ASSEMBLY__ */ 262#endif /* !__ASSEMBLY__ */
284 263
285#ifndef __ASSEMBLY__ 264#ifndef __ASSEMBLY__
286extern void arch_task_cache_init(void); 265extern void arch_task_cache_init(void);
266extern void free_thread_info(struct thread_info *ti);
287extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 267extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
288extern void arch_release_task_struct(struct task_struct *tsk); 268#define arch_task_cache_init arch_task_cache_init
289#endif 269#endif
290#endif /* _ASM_X86_THREAD_INFO_H */ 270#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 34baa0eb5d0..431793e5d48 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -57,10 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
57 57
58static inline unsigned long long __cycles_2_ns(unsigned long long cyc) 58static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
59{ 59{
60 unsigned long long quot;
61 unsigned long long rem;
60 int cpu = smp_processor_id(); 62 int cpu = smp_processor_id();
61 unsigned long long ns = per_cpu(cyc2ns_offset, cpu); 63 unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
62 ns += mult_frac(cyc, per_cpu(cyc2ns, cpu), 64 quot = (cyc >> CYC2NS_SCALE_FACTOR);
63 (1UL << CYC2NS_SCALE_FACTOR)); 65 rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
66 ns += quot * per_cpu(cyc2ns, cpu) +
67 ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
64 return ns; 68 return ns;
65} 69}
66 70
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 4fef20773b8..829215fef9e 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -4,14 +4,7 @@
4#define tlb_start_vma(tlb, vma) do { } while (0) 4#define tlb_start_vma(tlb, vma) do { } while (0)
5#define tlb_end_vma(tlb, vma) do { } while (0) 5#define tlb_end_vma(tlb, vma) do { } while (0)
6#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) 6#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
7 7#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
8#define tlb_flush(tlb) \
9{ \
10 if (tlb->fullmm == 0) \
11 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
12 else \
13 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
14}
15 8
16#include <asm-generic/tlb.h> 9#include <asm-generic/tlb.h>
17 10
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 0fee48e279c..169be8938b9 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -5,7 +5,7 @@
5#include <linux/sched.h> 5#include <linux/sched.h>
6 6
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <asm/special_insns.h> 8#include <asm/system.h>
9 9
10#ifdef CONFIG_PARAVIRT 10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h> 11#include <asm/paravirt.h>
@@ -56,10 +56,17 @@ static inline void __flush_tlb_all(void)
56 56
57static inline void __flush_tlb_one(unsigned long addr) 57static inline void __flush_tlb_one(unsigned long addr)
58{ 58{
59 if (cpu_has_invlpg)
59 __flush_tlb_single(addr); 60 __flush_tlb_single(addr);
61 else
62 __flush_tlb();
60} 63}
61 64
62#define TLB_FLUSH_ALL -1UL 65#ifdef CONFIG_X86_32
66# define TLB_FLUSH_ALL 0xffffffff
67#else
68# define TLB_FLUSH_ALL -1ULL
69#endif
63 70
64/* 71/*
65 * TLB flushing: 72 * TLB flushing:
@@ -70,10 +77,14 @@ static inline void __flush_tlb_one(unsigned long addr)
70 * - flush_tlb_page(vma, vmaddr) flushes one page 77 * - flush_tlb_page(vma, vmaddr) flushes one page
71 * - flush_tlb_range(vma, start, end) flushes a range of pages 78 * - flush_tlb_range(vma, start, end) flushes a range of pages
72 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 79 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
73 * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus 80 * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
74 * 81 *
75 * ..but the i386 has somewhat limited tlb flushing capabilities, 82 * ..but the i386 has somewhat limited tlb flushing capabilities,
76 * and page-granular flushes are available only on i486 and up. 83 * and page-granular flushes are available only on i486 and up.
84 *
85 * x86-64 can only flush individual pages or full VMs. For a range flush
86 * we always do the full VM. Might be worth trying if for a small
87 * range a few INVLPGs in a row are a win.
77 */ 88 */
78 89
79#ifndef CONFIG_SMP 90#ifndef CONFIG_SMP
@@ -102,17 +113,9 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
102 __flush_tlb(); 113 __flush_tlb();
103} 114}
104 115
105static inline void flush_tlb_mm_range(struct mm_struct *mm,
106 unsigned long start, unsigned long end, unsigned long vmflag)
107{
108 if (mm == current->active_mm)
109 __flush_tlb();
110}
111
112static inline void native_flush_tlb_others(const struct cpumask *cpumask, 116static inline void native_flush_tlb_others(const struct cpumask *cpumask,
113 struct mm_struct *mm, 117 struct mm_struct *mm,
114 unsigned long start, 118 unsigned long va)
115 unsigned long end)
116{ 119{
117} 120}
118 121
@@ -120,35 +123,27 @@ static inline void reset_lazy_tlbstate(void)
120{ 123{
121} 124}
122 125
123static inline void flush_tlb_kernel_range(unsigned long start,
124 unsigned long end)
125{
126 flush_tlb_all();
127}
128
129#else /* SMP */ 126#else /* SMP */
130 127
131#include <asm/smp.h> 128#include <asm/smp.h>
132 129
133#define local_flush_tlb() __flush_tlb() 130#define local_flush_tlb() __flush_tlb()
134 131
135#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
136
137#define flush_tlb_range(vma, start, end) \
138 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
139
140extern void flush_tlb_all(void); 132extern void flush_tlb_all(void);
141extern void flush_tlb_current_task(void); 133extern void flush_tlb_current_task(void);
134extern void flush_tlb_mm(struct mm_struct *);
142extern void flush_tlb_page(struct vm_area_struct *, unsigned long); 135extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
143extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
144 unsigned long end, unsigned long vmflag);
145extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
146 136
147#define flush_tlb() flush_tlb_current_task() 137#define flush_tlb() flush_tlb_current_task()
148 138
139static inline void flush_tlb_range(struct vm_area_struct *vma,
140 unsigned long start, unsigned long end)
141{
142 flush_tlb_mm(vma->vm_mm);
143}
144
149void native_flush_tlb_others(const struct cpumask *cpumask, 145void native_flush_tlb_others(const struct cpumask *cpumask,
150 struct mm_struct *mm, 146 struct mm_struct *mm, unsigned long va);
151 unsigned long start, unsigned long end);
152 147
153#define TLBSTATE_OK 1 148#define TLBSTATE_OK 1
154#define TLBSTATE_LAZY 2 149#define TLBSTATE_LAZY 2
@@ -161,15 +156,20 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
161 156
162static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
163{ 158{
164 this_cpu_write(cpu_tlbstate.state, 0); 159 percpu_write(cpu_tlbstate.state, 0);
165 this_cpu_write(cpu_tlbstate.active_mm, &init_mm); 160 percpu_write(cpu_tlbstate.active_mm, &init_mm);
166} 161}
167 162
168#endif /* SMP */ 163#endif /* SMP */
169 164
170#ifndef CONFIG_PARAVIRT 165#ifndef CONFIG_PARAVIRT
171#define flush_tlb_others(mask, mm, start, end) \ 166#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
172 native_flush_tlb_others(mask, mm, start, end)
173#endif 167#endif
174 168
169static inline void flush_tlb_kernel_range(unsigned long start,
170 unsigned long end)
171{
172 flush_tlb_all();
173}
174
175#endif /* _ASM_X86_TLBFLUSH_H */ 175#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 095b21507b6..c00692476e9 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -92,8 +92,48 @@ extern void setup_node_to_cpumask_map(void);
92 92
93#define pcibus_to_node(bus) __pcibus_to_node(bus) 93#define pcibus_to_node(bus) __pcibus_to_node(bus)
94 94
95#ifdef CONFIG_X86_32
96# define SD_CACHE_NICE_TRIES 1
97# define SD_IDLE_IDX 1
98#else
99# define SD_CACHE_NICE_TRIES 2
100# define SD_IDLE_IDX 2
101#endif
102
103/* sched_domains SD_NODE_INIT for NUMA machines */
104#define SD_NODE_INIT (struct sched_domain) { \
105 .min_interval = 8, \
106 .max_interval = 32, \
107 .busy_factor = 32, \
108 .imbalance_pct = 125, \
109 .cache_nice_tries = SD_CACHE_NICE_TRIES, \
110 .busy_idx = 3, \
111 .idle_idx = SD_IDLE_IDX, \
112 .newidle_idx = 0, \
113 .wake_idx = 0, \
114 .forkexec_idx = 0, \
115 \
116 .flags = 1*SD_LOAD_BALANCE \
117 | 1*SD_BALANCE_NEWIDLE \
118 | 1*SD_BALANCE_EXEC \
119 | 1*SD_BALANCE_FORK \
120 | 0*SD_BALANCE_WAKE \
121 | 1*SD_WAKE_AFFINE \
122 | 0*SD_PREFER_LOCAL \
123 | 0*SD_SHARE_CPUPOWER \
124 | 0*SD_POWERSAVINGS_BALANCE \
125 | 0*SD_SHARE_PKG_RESOURCES \
126 | 1*SD_SERIALIZE \
127 | 0*SD_PREFER_SIBLING \
128 , \
129 .last_balance = jiffies, \
130 .balance_interval = 1, \
131}
132
133#ifdef CONFIG_X86_64
95extern int __node_distance(int, int); 134extern int __node_distance(int, int);
96#define node_distance(a, b) __node_distance(a, b) 135#define node_distance(a, b) __node_distance(a, b)
136#endif
97 137
98#else /* !CONFIG_NUMA */ 138#else /* !CONFIG_NUMA */
99 139
@@ -134,7 +174,7 @@ static inline void arch_fix_phys_package_id(int num, u32 slot)
134} 174}
135 175
136struct pci_bus; 176struct pci_bus;
137void x86_pci_root_bus_resources(int bus, struct list_head *resources); 177void x86_pci_root_bus_res_quirks(struct pci_bus *b);
138 178
139#ifdef CONFIG_SMP 179#ifdef CONFIG_SMP
140#define mc_capable() ((boot_cpu_data.x86_max_cores > 1) && \ 180#define mc_capable() ((boot_cpu_data.x86_max_cores > 1) && \
diff --git a/arch/x86/include/asm/trace_clock.h b/arch/x86/include/asm/trace_clock.h
deleted file mode 100644
index beab86cc282..00000000000
--- a/arch/x86/include/asm/trace_clock.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _ASM_X86_TRACE_CLOCK_H
2#define _ASM_X86_TRACE_CLOCK_H
3
4#include <linux/compiler.h>
5#include <linux/types.h>
6
7#ifdef CONFIG_X86_TSC
8
9extern u64 notrace trace_clock_x86_tsc(void);
10
11# define ARCH_TRACE_CLOCKS \
12 { trace_clock_x86_tsc, "x86-tsc", .in_ns = 0 },
13
14#else /* !CONFIG_X86_TSC */
15
16#define ARCH_TRACE_CLOCKS
17
18#endif
19
20#endif /* _ASM_X86_TRACE_CLOCK_H */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 88eae2aec61..0012d0902c5 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -89,29 +89,4 @@ asmlinkage void smp_thermal_interrupt(void);
89asmlinkage void mce_threshold_interrupt(void); 89asmlinkage void mce_threshold_interrupt(void);
90#endif 90#endif
91 91
92/* Interrupts/Exceptions */
93enum {
94 X86_TRAP_DE = 0, /* 0, Divide-by-zero */
95 X86_TRAP_DB, /* 1, Debug */
96 X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
97 X86_TRAP_BP, /* 3, Breakpoint */
98 X86_TRAP_OF, /* 4, Overflow */
99 X86_TRAP_BR, /* 5, Bound Range Exceeded */
100 X86_TRAP_UD, /* 6, Invalid Opcode */
101 X86_TRAP_NM, /* 7, Device Not Available */
102 X86_TRAP_DF, /* 8, Double Fault */
103 X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
104 X86_TRAP_TS, /* 10, Invalid TSS */
105 X86_TRAP_NP, /* 11, Segment Not Present */
106 X86_TRAP_SS, /* 12, Stack Segment Fault */
107 X86_TRAP_GP, /* 13, General Protection Fault */
108 X86_TRAP_PF, /* 14, Page Fault */
109 X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
110 X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
111 X86_TRAP_AC, /* 17, Alignment Check */
112 X86_TRAP_MC, /* 18, Machine Check */
113 X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
114 X86_TRAP_IRET = 32, /* 32, IRET Exception */
115};
116
117#endif /* _ASM_X86_TRAPS_H */ 92#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index c91e8b9d588..83e2efd181e 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -51,8 +51,6 @@ extern int unsynchronized_tsc(void);
51extern int check_tsc_unstable(void); 51extern int check_tsc_unstable(void);
52extern unsigned long native_calibrate_tsc(void); 52extern unsigned long native_calibrate_tsc(void);
53 53
54extern int tsc_clocksource_reliable;
55
56/* 54/*
57 * Boot-time check whether the TSCs are synchronized across 55 * Boot-time check whether the TSCs are synchronized across
58 * all CPUs/cores: 56 * all CPUs/cores:
@@ -61,7 +59,7 @@ extern void check_tsc_sync_source(int cpu);
61extern void check_tsc_sync_target(void); 59extern void check_tsc_sync_target(void);
62 60
63extern int notsc_setup(char *); 61extern int notsc_setup(char *);
64extern void tsc_save_sched_clock_state(void); 62extern void save_sched_clock_state(void);
65extern void tsc_restore_sched_clock_state(void); 63extern void restore_sched_clock_state(void);
66 64
67#endif /* _ASM_X86_TSC_H */ 65#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 1709801d18e..36361bf6fdd 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -9,7 +9,6 @@
9#include <linux/string.h> 9#include <linux/string.h>
10#include <asm/asm.h> 10#include <asm/asm.h>
11#include <asm/page.h> 11#include <asm/page.h>
12#include <asm/smap.h>
13 12
14#define VERIFY_READ 0 13#define VERIFY_READ 0
15#define VERIFY_WRITE 1 14#define VERIFY_WRITE 1
@@ -33,9 +32,9 @@
33 32
34#define segment_eq(a, b) ((a).seg == (b).seg) 33#define segment_eq(a, b) ((a).seg == (b).seg)
35 34
36#define user_addr_max() (current_thread_info()->addr_limit.seg) 35#define __addr_ok(addr) \
37#define __addr_ok(addr) \ 36 ((unsigned long __force)(addr) < \
38 ((unsigned long __force)(addr) < user_addr_max()) 37 (current_thread_info()->addr_limit.seg))
39 38
40/* 39/*
41 * Test whether a block of memory is a valid user space address. 40 * Test whether a block of memory is a valid user space address.
@@ -47,14 +46,14 @@
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... 46 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */ 47 */
49 48
50#define __range_not_ok(addr, size, limit) \ 49#define __range_not_ok(addr, size) \
51({ \ 50({ \
52 unsigned long flag, roksum; \ 51 unsigned long flag, roksum; \
53 __chk_user_ptr(addr); \ 52 __chk_user_ptr(addr); \
54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ 53 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
55 : "=&r" (flag), "=r" (roksum) \ 54 : "=&r" (flag), "=r" (roksum) \
56 : "1" (addr), "g" ((long)(size)), \ 55 : "1" (addr), "g" ((long)(size)), \
57 "rm" (limit)); \ 56 "rm" (current_thread_info()->addr_limit.seg)); \
58 flag; \ 57 flag; \
59}) 58})
60 59
@@ -77,16 +76,14 @@
77 * checks that the pointer is in the user space range - after calling 76 * checks that the pointer is in the user space range - after calling
78 * this function, memory access functions may still return -EFAULT. 77 * this function, memory access functions may still return -EFAULT.
79 */ 78 */
80#define access_ok(type, addr, size) \ 79#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
81 (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
82 80
83/* 81/*
84 * The exception table consists of pairs of addresses relative to the 82 * The exception table consists of pairs of addresses: the first is the
85 * exception table enty itself: the first is the address of an 83 * address of an instruction that is allowed to fault, and the second is
86 * instruction that is allowed to fault, and the second is the address 84 * the address at which the program should continue. No registers are
87 * at which the program should continue. No registers are modified, 85 * modified, so it is entirely up to the continuation code to figure out
88 * so it is entirely up to the continuation code to figure out what to 86 * what to do.
89 * do.
90 * 87 *
91 * All the routines below use bits of fixup code that are out of line 88 * All the routines below use bits of fixup code that are out of line
92 * with the main instruction path. This means when everything is well, 89 * with the main instruction path. This means when everything is well,
@@ -95,14 +92,10 @@
95 */ 92 */
96 93
97struct exception_table_entry { 94struct exception_table_entry {
98 int insn, fixup; 95 unsigned long insn, fixup;
99}; 96};
100/* This is not the generic standard exception_table_entry format */
101#define ARCH_HAS_SORT_EXTABLE
102#define ARCH_HAS_SEARCH_EXTABLE
103 97
104extern int fixup_exception(struct pt_regs *regs); 98extern int fixup_exception(struct pt_regs *regs);
105extern int early_fixup_exception(unsigned long *ip);
106 99
107/* 100/*
108 * These are the main single-value transfer routines. They automatically 101 * These are the main single-value transfer routines. They automatically
@@ -193,10 +186,9 @@ extern int __get_user_bad(void);
193 186
194#ifdef CONFIG_X86_32 187#ifdef CONFIG_X86_32
195#define __put_user_asm_u64(x, addr, err, errret) \ 188#define __put_user_asm_u64(x, addr, err, errret) \
196 asm volatile(ASM_STAC "\n" \ 189 asm volatile("1: movl %%eax,0(%2)\n" \
197 "1: movl %%eax,0(%2)\n" \
198 "2: movl %%edx,4(%2)\n" \ 190 "2: movl %%edx,4(%2)\n" \
199 "3: " ASM_CLAC "\n" \ 191 "3:\n" \
200 ".section .fixup,\"ax\"\n" \ 192 ".section .fixup,\"ax\"\n" \
201 "4: movl %3,%0\n" \ 193 "4: movl %3,%0\n" \
202 " jmp 3b\n" \ 194 " jmp 3b\n" \
@@ -207,12 +199,11 @@ extern int __get_user_bad(void);
207 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 199 : "A" (x), "r" (addr), "i" (errret), "0" (err))
208 200
209#define __put_user_asm_ex_u64(x, addr) \ 201#define __put_user_asm_ex_u64(x, addr) \
210 asm volatile(ASM_STAC "\n" \ 202 asm volatile("1: movl %%eax,0(%1)\n" \
211 "1: movl %%eax,0(%1)\n" \
212 "2: movl %%edx,4(%1)\n" \ 203 "2: movl %%edx,4(%1)\n" \
213 "3: " ASM_CLAC "\n" \ 204 "3:\n" \
214 _ASM_EXTABLE_EX(1b, 2b) \ 205 _ASM_EXTABLE(1b, 2b - 1b) \
215 _ASM_EXTABLE_EX(2b, 3b) \ 206 _ASM_EXTABLE(2b, 3b - 2b) \
216 : : "A" (x), "r" (addr)) 207 : : "A" (x), "r" (addr))
217 208
218#define __put_user_x8(x, ptr, __ret_pu) \ 209#define __put_user_x8(x, ptr, __ret_pu) \
@@ -237,6 +228,8 @@ extern void __put_user_2(void);
237extern void __put_user_4(void); 228extern void __put_user_4(void);
238extern void __put_user_8(void); 229extern void __put_user_8(void);
239 230
231#ifdef CONFIG_X86_WP_WORKS_OK
232
240/** 233/**
241 * put_user: - Write a simple value into user space. 234 * put_user: - Write a simple value into user space.
242 * @x: Value to copy to user space. 235 * @x: Value to copy to user space.
@@ -324,6 +317,29 @@ do { \
324 } \ 317 } \
325} while (0) 318} while (0)
326 319
320#else
321
322#define __put_user_size(x, ptr, size, retval, errret) \
323do { \
324 __typeof__(*(ptr))__pus_tmp = x; \
325 retval = 0; \
326 \
327 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
328 retval = errret; \
329} while (0)
330
331#define put_user(x, ptr) \
332({ \
333 int __ret_pu; \
334 __typeof__(*(ptr))__pus_tmp = x; \
335 __ret_pu = 0; \
336 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
337 sizeof(*(ptr))) != 0)) \
338 __ret_pu = -EFAULT; \
339 __ret_pu; \
340})
341#endif
342
327#ifdef CONFIG_X86_32 343#ifdef CONFIG_X86_32
328#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 344#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
329#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 345#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
@@ -357,9 +373,8 @@ do { \
357} while (0) 373} while (0)
358 374
359#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 375#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
360 asm volatile(ASM_STAC "\n" \ 376 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
361 "1: mov"itype" %2,%"rtype"1\n" \ 377 "2:\n" \
362 "2: " ASM_CLAC "\n" \
363 ".section .fixup,\"ax\"\n" \ 378 ".section .fixup,\"ax\"\n" \
364 "3: mov %3,%0\n" \ 379 "3: mov %3,%0\n" \
365 " xor"itype" %"rtype"1,%"rtype"1\n" \ 380 " xor"itype" %"rtype"1,%"rtype"1\n" \
@@ -393,7 +408,7 @@ do { \
393#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 408#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
394 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 409 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
395 "2:\n" \ 410 "2:\n" \
396 _ASM_EXTABLE_EX(1b, 2b) \ 411 _ASM_EXTABLE(1b, 2b - 1b) \
397 : ltype(x) : "m" (__m(addr))) 412 : ltype(x) : "m" (__m(addr)))
398 413
399#define __put_user_nocheck(x, ptr, size) \ 414#define __put_user_nocheck(x, ptr, size) \
@@ -422,9 +437,8 @@ struct __large_struct { unsigned long buf[100]; };
422 * aliasing issues. 437 * aliasing issues.
423 */ 438 */
424#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 439#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
425 asm volatile(ASM_STAC "\n" \ 440 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
426 "1: mov"itype" %"rtype"1,%2\n" \ 441 "2:\n" \
427 "2: " ASM_CLAC "\n" \
428 ".section .fixup,\"ax\"\n" \ 442 ".section .fixup,\"ax\"\n" \
429 "3: mov %3,%0\n" \ 443 "3: mov %3,%0\n" \
430 " jmp 2b\n" \ 444 " jmp 2b\n" \
@@ -436,20 +450,20 @@ struct __large_struct { unsigned long buf[100]; };
436#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 450#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
437 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 451 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
438 "2:\n" \ 452 "2:\n" \
439 _ASM_EXTABLE_EX(1b, 2b) \ 453 _ASM_EXTABLE(1b, 2b - 1b) \
440 : : ltype(x), "m" (__m(addr))) 454 : : ltype(x), "m" (__m(addr)))
441 455
442/* 456/*
443 * uaccess_try and catch 457 * uaccess_try and catch
444 */ 458 */
445#define uaccess_try do { \ 459#define uaccess_try do { \
460 int prev_err = current_thread_info()->uaccess_err; \
446 current_thread_info()->uaccess_err = 0; \ 461 current_thread_info()->uaccess_err = 0; \
447 stac(); \
448 barrier(); 462 barrier();
449 463
450#define uaccess_catch(err) \ 464#define uaccess_catch(err) \
451 clac(); \ 465 (err) |= current_thread_info()->uaccess_err; \
452 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ 466 current_thread_info()->uaccess_err = prev_err; \
453} while (0) 467} while (0)
454 468
455/** 469/**
@@ -518,22 +532,31 @@ struct __large_struct { unsigned long buf[100]; };
518 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 532 (x) = (__force __typeof__(*(ptr)))__gue_val; \
519} while (0) 533} while (0)
520 534
535#ifdef CONFIG_X86_WP_WORKS_OK
536
521#define put_user_try uaccess_try 537#define put_user_try uaccess_try
522#define put_user_catch(err) uaccess_catch(err) 538#define put_user_catch(err) uaccess_catch(err)
523 539
524#define put_user_ex(x, ptr) \ 540#define put_user_ex(x, ptr) \
525 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 541 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
526 542
527extern unsigned long 543#else /* !CONFIG_X86_WP_WORKS_OK */
528copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
529extern __must_check long
530strncpy_from_user(char *dst, const char __user *src, long count);
531 544
532extern __must_check long strlen_user(const char __user *str); 545#define put_user_try do { \
533extern __must_check long strnlen_user(const char __user *str, long n); 546 int __uaccess_err = 0;
534 547
535unsigned long __must_check clear_user(void __user *mem, unsigned long len); 548#define put_user_catch(err) \
536unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 549 (err) |= __uaccess_err; \
550} while (0)
551
552#define put_user_ex(x, ptr) do { \
553 __uaccess_err |= __put_user(x, ptr); \
554} while (0)
555
556#endif /* CONFIG_X86_WP_WORKS_OK */
557
558extern unsigned long
559copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
537 560
538/* 561/*
539 * movsl can be slow when source and dest are not both 8-byte aligned 562 * movsl can be slow when source and dest are not both 8-byte aligned
@@ -547,9 +570,9 @@ extern struct movsl_mask {
547#define ARCH_HAS_NOCACHE_UACCESS 1 570#define ARCH_HAS_NOCACHE_UACCESS 1
548 571
549#ifdef CONFIG_X86_32 572#ifdef CONFIG_X86_32
550# include <asm/uaccess_32.h> 573# include "uaccess_32.h"
551#else 574#else
552# include <asm/uaccess_64.h> 575# include "uaccess_64.h"
553#endif 576#endif
554 577
555#endif /* _ASM_X86_UACCESS_H */ 578#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 7f760a9f1f6..566e803cc60 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,4 +213,29 @@ static inline unsigned long __must_check copy_from_user(void *to,
213 return n; 213 return n;
214} 214}
215 215
216long __must_check strncpy_from_user(char *dst, const char __user *src,
217 long count);
218long __must_check __strncpy_from_user(char *dst,
219 const char __user *src, long count);
220
221/**
222 * strlen_user: - Get the size of a string in user space.
223 * @str: The string to measure.
224 *
225 * Context: User context only. This function may sleep.
226 *
227 * Get the size of a NUL-terminated string in user space.
228 *
229 * Returns the size of the string INCLUDING the terminating NUL.
230 * On exception, returns 0.
231 *
232 * If there is a limit on the length of a valid string, you may wish to
233 * consider using strnlen_user() instead.
234 */
235#define strlen_user(str) strnlen_user(str, LONG_MAX)
236
237long strnlen_user(const char __user *str, long n);
238unsigned long __must_check clear_user(void __user *mem, unsigned long len);
239unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
240
216#endif /* _ASM_X86_UACCESS_32_H */ 241#endif /* _ASM_X86_UACCESS_32_H */
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 142810c457d..1c66d30971a 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -17,8 +17,6 @@
17 17
18/* Handles exceptions in both to and from, but doesn't do access_ok */ 18/* Handles exceptions in both to and from, but doesn't do access_ok */
19__must_check unsigned long 19__must_check unsigned long
20copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21__must_check unsigned long
22copy_user_generic_string(void *to, const void *from, unsigned len); 20copy_user_generic_string(void *to, const void *from, unsigned len);
23__must_check unsigned long 21__must_check unsigned long
24copy_user_generic_unrolled(void *to, const void *from, unsigned len); 22copy_user_generic_unrolled(void *to, const void *from, unsigned len);
@@ -28,16 +26,9 @@ copy_user_generic(void *to, const void *from, unsigned len)
28{ 26{
29 unsigned ret; 27 unsigned ret;
30 28
31 /* 29 alternative_call(copy_user_generic_unrolled,
32 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34 * Otherwise, use copy_user_generic_unrolled.
35 */
36 alternative_call_2(copy_user_generic_unrolled,
37 copy_user_generic_string, 30 copy_user_generic_string,
38 X86_FEATURE_REP_GOOD, 31 X86_FEATURE_REP_GOOD,
39 copy_user_enhanced_fast_string,
40 X86_FEATURE_ERMS,
41 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), 32 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42 "=d" (len)), 33 "=d" (len)),
43 "1" (to), "2" (from), "3" (len) 34 "1" (to), "2" (from), "3" (len)
@@ -217,6 +208,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
217 } 208 }
218} 209}
219 210
211__must_check long
212strncpy_from_user(char *dst, const char __user *src, long count);
213__must_check long
214__strncpy_from_user(char *dst, const char __user *src, long count);
215__must_check long strnlen_user(const char __user *str, long n);
216__must_check long __strnlen_user(const char __user *str, long n);
217__must_check long strlen_user(const char __user *str);
218__must_check unsigned long clear_user(void __user *mem, unsigned long len);
219__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
220
220static __must_check __always_inline int 221static __must_check __always_inline int
221__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) 222__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
222{ 223{
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index a0790e07ba6..2a58ed3e51d 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -1,63 +1,13 @@
1#ifndef _ASM_X86_UNISTD_H 1#ifdef __KERNEL__
2#define _ASM_X86_UNISTD_H 1 2# ifdef CONFIG_X86_32
3 3# include "unistd_32.h"
4#include <uapi/asm/unistd.h>
5
6
7# ifdef CONFIG_X86_X32_ABI
8# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
9# else 4# else
10# define __SYSCALL_MASK (~0) 5# include "unistd_64.h"
11# endif 6# endif
12 7#else
13# ifdef CONFIG_X86_32 8# ifdef __i386__
14 9# include "unistd_32.h"
15# include <asm/unistd_32.h>
16# define __ARCH_WANT_STAT64
17# define __ARCH_WANT_SYS_IPC
18# define __ARCH_WANT_SYS_OLD_MMAP
19# define __ARCH_WANT_SYS_OLD_SELECT
20
21# else 10# else
22 11# include "unistd_64.h"
23# include <asm/unistd_64.h>
24# include <asm/unistd_64_x32.h>
25# define __ARCH_WANT_COMPAT_SYS_TIME
26
27# endif 12# endif
28 13#endif
29# define __ARCH_WANT_OLD_READDIR
30# define __ARCH_WANT_OLD_STAT
31# define __ARCH_WANT_SYS_ALARM
32# define __ARCH_WANT_SYS_FADVISE64
33# define __ARCH_WANT_SYS_GETHOSTNAME
34# define __ARCH_WANT_SYS_GETPGRP
35# define __ARCH_WANT_SYS_LLSEEK
36# define __ARCH_WANT_SYS_NICE
37# define __ARCH_WANT_SYS_OLDUMOUNT
38# define __ARCH_WANT_SYS_OLD_GETRLIMIT
39# define __ARCH_WANT_SYS_OLD_UNAME
40# define __ARCH_WANT_SYS_PAUSE
41# define __ARCH_WANT_SYS_RT_SIGACTION
42# define __ARCH_WANT_SYS_RT_SIGSUSPEND
43# define __ARCH_WANT_SYS_SGETMASK
44# define __ARCH_WANT_SYS_SIGNAL
45# define __ARCH_WANT_SYS_SIGPENDING
46# define __ARCH_WANT_SYS_SIGPROCMASK
47# define __ARCH_WANT_SYS_SOCKETCALL
48# define __ARCH_WANT_SYS_TIME
49# define __ARCH_WANT_SYS_UTIME
50# define __ARCH_WANT_SYS_WAITPID
51# define __ARCH_WANT_SYS_FORK
52# define __ARCH_WANT_SYS_VFORK
53# define __ARCH_WANT_SYS_CLONE
54
55/*
56 * "Conditional" syscalls
57 *
58 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
59 * but it doesn't work on all toolchains, so we just do it by hand
60 */
61# define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
62
63#endif /* _ASM_X86_UNISTD_H */
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
deleted file mode 100644
index 8ff8be7835a..00000000000
--- a/arch/x86/include/asm/uprobes.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _ASM_UPROBES_H
2#define _ASM_UPROBES_H
3/*
4 * User-space Probes (UProbes) for x86
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright (C) IBM Corporation, 2008-2011
21 * Authors:
22 * Srikar Dronamraju
23 * Jim Keniston
24 */
25
26#include <linux/notifier.h>
27
28typedef u8 uprobe_opcode_t;
29
30#define MAX_UINSN_BYTES 16
31#define UPROBE_XOL_SLOT_BYTES 128 /* to keep it cache aligned */
32
33#define UPROBE_SWBP_INSN 0xcc
34#define UPROBE_SWBP_INSN_SIZE 1
35
36struct arch_uprobe {
37 u16 fixups;
38 u8 insn[MAX_UINSN_BYTES];
39#ifdef CONFIG_X86_64
40 unsigned long rip_rela_target_address;
41#endif
42};
43
44struct arch_uprobe_task {
45#ifdef CONFIG_X86_64
46 unsigned long saved_scratch_register;
47#endif
48 unsigned int saved_trap_nr;
49 unsigned int saved_tf;
50};
51
52extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
53extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
54extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
55extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
56extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
57extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
58#endif /* _ASM_UPROBES_H */
diff --git a/arch/x86/include/asm/user.h b/arch/x86/include/asm/user.h
index ccab4af1646..24532c7da3d 100644
--- a/arch/x86/include/asm/user.h
+++ b/arch/x86/include/asm/user.h
@@ -2,9 +2,9 @@
2#define _ASM_X86_USER_H 2#define _ASM_X86_USER_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5# include <asm/user_32.h> 5# include "user_32.h"
6#else 6#else
7# include <asm/user_64.h> 7# include "user_64.h"
8#endif 8#endif
9 9
10#include <asm/types.h> 10#include <asm/types.h>
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index b47c2a82ff1..3bb9491b765 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -15,8 +15,7 @@ extern void uv_nmi_init(void);
15extern void uv_system_init(void); 15extern void uv_system_init(void);
16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
17 struct mm_struct *mm, 17 struct mm_struct *mm,
18 unsigned long start, 18 unsigned long va,
19 unsigned end,
20 unsigned int cpu); 19 unsigned int cpu);
21 20
22#else /* X86_UV */ 21#else /* X86_UV */
@@ -27,7 +26,7 @@ static inline void uv_cpu_init(void) { }
27static inline void uv_system_init(void) { } 26static inline void uv_system_init(void) { }
28static inline const struct cpumask * 27static inline const struct cpumask *
29uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, 28uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
30 unsigned long start, unsigned long end, unsigned int cpu) 29 unsigned long va, unsigned int cpu)
31{ return cpumask; } 30{ return cpumask; }
32 31
33#endif /* X86_UV */ 32#endif /* X86_UV */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index a06983cdc12..0c767a8e000 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -65,7 +65,7 @@
65 * UV2: Bit 19 selects between 65 * UV2: Bit 19 selects between
66 * (0): 10 microsecond timebase and 66 * (0): 10 microsecond timebase and
67 * (1): 80 microseconds 67 * (1): 80 microseconds
68 * we're using 560us, similar to UV1: 65 units of 10us 68 * we're using 655us, similar to UV1: 65 units of 10us
69 */ 69 */
70#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL) 70#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
71#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL) 71#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
@@ -140,9 +140,6 @@
140#define IPI_RESET_LIMIT 1 140#define IPI_RESET_LIMIT 1
141/* after this # consecutive successes, bump up the throttle if it was lowered */ 141/* after this # consecutive successes, bump up the throttle if it was lowered */
142#define COMPLETE_THRESHOLD 5 142#define COMPLETE_THRESHOLD 5
143/* after this # of giveups (fall back to kernel IPI's) disable the use of
144 the BAU for a period of time */
145#define GIVEUP_LIMIT 100
146 143
147#define UV_LB_SUBNODEID 0x10 144#define UV_LB_SUBNODEID 0x10
148 145
@@ -152,6 +149,7 @@
152/* 4 bits of software ack period */ 149/* 4 bits of software ack period */
153#define UV2_ACK_MASK 0x7UL 150#define UV2_ACK_MASK 0x7UL
154#define UV2_ACK_UNITS_SHFT 3 151#define UV2_ACK_UNITS_SHFT 3
152#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
155#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 153#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
156 154
157/* 155/*
@@ -177,7 +175,7 @@
177 microseconds */ 175 microseconds */
178#define CONGESTED_REPS 10 /* long delays averaged over 176#define CONGESTED_REPS 10 /* long delays averaged over
179 this many broadcasts */ 177 this many broadcasts */
180#define DISABLED_PERIOD 10 /* time for the bau to be 178#define CONGESTED_PERIOD 30 /* time for the bau to be
181 disabled, in seconds */ 179 disabled, in seconds */
182/* see msg_type: */ 180/* see msg_type: */
183#define MSG_NOOP 0 181#define MSG_NOOP 0
@@ -237,10 +235,10 @@ struct bau_msg_payload {
237 235
238 236
239/* 237/*
240 * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) 238 * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
241 * see table 4.2.3.0.1 in broacast_assist spec. 239 * see table 4.2.3.0.1 in broacast_assist spec.
242 */ 240 */
243struct uv1_bau_msg_header { 241struct bau_msg_header {
244 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 242 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
245 /* bits 5:0 */ 243 /* bits 5:0 */
246 unsigned int base_dest_nasid:15; /* nasid of the first bit */ 244 unsigned int base_dest_nasid:15; /* nasid of the first bit */
@@ -320,87 +318,19 @@ struct uv1_bau_msg_header {
320}; 318};
321 319
322/* 320/*
323 * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
324 * see figure 9-2 of harp_sys.pdf
325 */
326struct uv2_bau_msg_header {
327 unsigned int base_dest_nasid:15; /* nasid of the first bit */
328 /* bits 14:0 */ /* in uvhub map */
329 unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
330 /* bits 19:15 */
331 unsigned int rsvd_1:1; /* must be zero */
332 /* bit 20 */
333 /* Address bits 59:21 */
334 /* bits 25:2 of address (44:21) are payload */
335 /* these next 24 bits become bytes 12-14 of msg */
336 /* bits 28:21 land in byte 12 */
337 unsigned int replied_to:1; /* sent as 0 by the source to
338 byte 12 */
339 /* bit 21 */
340 unsigned int msg_type:3; /* software type of the
341 message */
342 /* bits 24:22 */
343 unsigned int canceled:1; /* message canceled, resource
344 is to be freed*/
345 /* bit 25 */
346 unsigned int payload_1:3; /* not currently used */
347 /* bits 28:26 */
348
349 /* bits 36:29 land in byte 13 */
350 unsigned int payload_2a:3; /* not currently used */
351 unsigned int payload_2b:5; /* not currently used */
352 /* bits 36:29 */
353
354 /* bits 44:37 land in byte 14 */
355 unsigned int payload_3:8; /* not currently used */
356 /* bits 44:37 */
357
358 unsigned int rsvd_2:7; /* reserved */
359 /* bits 51:45 */
360 unsigned int swack_flag:1; /* software acknowledge flag */
361 /* bit 52 */
362 unsigned int rsvd_3a:3; /* must be zero */
363 unsigned int rsvd_3b:8; /* must be zero */
364 unsigned int rsvd_3c:8; /* must be zero */
365 unsigned int rsvd_3d:3; /* must be zero */
366 /* bits 74:53 */
367 unsigned int fairness:3; /* usually zero */
368 /* bits 77:75 */
369
370 unsigned int sequence:16; /* message sequence number */
371 /* bits 93:78 Suppl_A */
372 unsigned int chaining:1; /* next descriptor is part of
373 this activation*/
374 /* bit 94 */
375 unsigned int multilevel:1; /* multi-level multicast
376 format */
377 /* bit 95 */
378 unsigned int rsvd_4:24; /* ordered / source node /
379 source subnode / aging
380 must be zero */
381 /* bits 119:96 */
382 unsigned int command:8; /* message type */
383 /* bits 127:120 */
384};
385
386/*
387 * The activation descriptor: 321 * The activation descriptor:
388 * The format of the message to send, plus all accompanying control 322 * The format of the message to send, plus all accompanying control
389 * Should be 64 bytes 323 * Should be 64 bytes
390 */ 324 */
391struct bau_desc { 325struct bau_desc {
392 struct pnmask distribution; 326 struct pnmask distribution;
393 /* 327 /*
394 * message template, consisting of header and payload: 328 * message template, consisting of header and payload:
395 */ 329 */
396 union bau_msg_header { 330 struct bau_msg_header header;
397 struct uv1_bau_msg_header uv1_hdr; 331 struct bau_msg_payload payload;
398 struct uv2_bau_msg_header uv2_hdr;
399 } header;
400
401 struct bau_msg_payload payload;
402}; 332};
403/* UV1: 333/*
404 * -payload-- ---------header------ 334 * -payload-- ---------header------
405 * bytes 0-11 bits 41-56 bits 58-81 335 * bytes 0-11 bits 41-56 bits 58-81
406 * A B (2) C (3) 336 * A B (2) C (3)
@@ -410,16 +340,6 @@ struct bau_desc {
410 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector) 340 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
411 * ------------payload queue----------- 341 * ------------payload queue-----------
412 */ 342 */
413/* UV2:
414 * -payload-- ---------header------
415 * bytes 0-11 bits 70-78 bits 21-44
416 * A B (2) C (3)
417 *
418 * A/B/C are moved to:
419 * A C B
420 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
421 * ------------payload queue-----------
422 */
423 343
424/* 344/*
425 * The payload queue on the destination side is an array of these. 345 * The payload queue on the destination side is an array of these.
@@ -465,6 +385,7 @@ struct bau_pq_entry {
465struct msg_desc { 385struct msg_desc {
466 struct bau_pq_entry *msg; 386 struct bau_pq_entry *msg;
467 int msg_slot; 387 int msg_slot;
388 int swack_slot;
468 struct bau_pq_entry *queue_first; 389 struct bau_pq_entry *queue_first;
469 struct bau_pq_entry *queue_last; 390 struct bau_pq_entry *queue_last;
470}; 391};
@@ -484,7 +405,6 @@ struct ptc_stats {
484 requests */ 405 requests */
485 unsigned long s_stimeout; /* source side timeouts */ 406 unsigned long s_stimeout; /* source side timeouts */
486 unsigned long s_dtimeout; /* destination side timeouts */ 407 unsigned long s_dtimeout; /* destination side timeouts */
487 unsigned long s_strongnacks; /* number of strong nack's */
488 unsigned long s_time; /* time spent in sending side */ 408 unsigned long s_time; /* time spent in sending side */
489 unsigned long s_retriesok; /* successful retries */ 409 unsigned long s_retriesok; /* successful retries */
490 unsigned long s_ntargcpu; /* total number of cpu's 410 unsigned long s_ntargcpu; /* total number of cpu's
@@ -519,15 +439,6 @@ struct ptc_stats {
519 unsigned long s_retry_messages; /* retry broadcasts */ 439 unsigned long s_retry_messages; /* retry broadcasts */
520 unsigned long s_bau_reenabled; /* for bau enable/disable */ 440 unsigned long s_bau_reenabled; /* for bau enable/disable */
521 unsigned long s_bau_disabled; /* for bau enable/disable */ 441 unsigned long s_bau_disabled; /* for bau enable/disable */
522 unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
523 unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
524 unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
525 unsigned long s_overipilimit; /* over the ipi reset limit */
526 unsigned long s_giveuplimit; /* disables, over giveup limit*/
527 unsigned long s_enters; /* entries to the driver */
528 unsigned long s_ipifordisabled; /* fall back to IPI; disabled */
529 unsigned long s_plugged; /* plugged by h/w bug*/
530 unsigned long s_congested; /* giveup on long wait */
531 /* destination statistics */ 442 /* destination statistics */
532 unsigned long d_alltlb; /* times all tlb's on this 443 unsigned long d_alltlb; /* times all tlb's on this
533 cpu were flushed */ 444 cpu were flushed */
@@ -594,26 +505,21 @@ struct bau_control {
594 int timeout_tries; 505 int timeout_tries;
595 int ipi_attempts; 506 int ipi_attempts;
596 int conseccompletes; 507 int conseccompletes;
597 short nobau; 508 int baudisabled;
598 short baudisabled; 509 int set_bau_off;
599 short cpu; 510 short cpu;
600 short osnode; 511 short osnode;
601 short uvhub_cpu; 512 short uvhub_cpu;
602 short uvhub; 513 short uvhub;
603 short uvhub_version;
604 short cpus_in_socket; 514 short cpus_in_socket;
605 short cpus_in_uvhub; 515 short cpus_in_uvhub;
606 short partition_base_pnode; 516 short partition_base_pnode;
607 short busy; /* all were busy (war) */
608 unsigned short message_number; 517 unsigned short message_number;
609 unsigned short uvhub_quiesce; 518 unsigned short uvhub_quiesce;
610 short socket_acknowledge_count[DEST_Q_SIZE]; 519 short socket_acknowledge_count[DEST_Q_SIZE];
611 cycles_t send_message; 520 cycles_t send_message;
612 cycles_t period_end;
613 cycles_t period_time;
614 spinlock_t uvhub_lock; 521 spinlock_t uvhub_lock;
615 spinlock_t queue_lock; 522 spinlock_t queue_lock;
616 spinlock_t disable_lock;
617 /* tunables */ 523 /* tunables */
618 int max_concurr; 524 int max_concurr;
619 int max_concurr_const; 525 int max_concurr_const;
@@ -624,9 +530,8 @@ struct bau_control {
624 int complete_threshold; 530 int complete_threshold;
625 int cong_response_us; 531 int cong_response_us;
626 int cong_reps; 532 int cong_reps;
627 cycles_t disabled_period; 533 int cong_period;
628 int period_giveups; 534 cycles_t period_time;
629 int giveup_limit;
630 long period_requests; 535 long period_requests;
631 struct hub_and_pnode *thp; 536 struct hub_and_pnode *thp;
632}; 537};
@@ -686,11 +591,6 @@ static inline void write_mmr_sw_ack(unsigned long mr)
686 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); 591 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
687} 592}
688 593
689static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
690{
691 write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
692}
693
694static inline unsigned long read_mmr_sw_ack(void) 594static inline unsigned long read_mmr_sw_ack(void)
695{ 595{
696 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); 596 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
@@ -757,7 +657,11 @@ static inline int atomic_read_short(const struct atomic_short *v)
757 */ 657 */
758static inline int atom_asr(short i, struct atomic_short *v) 658static inline int atom_asr(short i, struct atomic_short *v)
759{ 659{
760 return i + xadd(&v->counter, i); 660 short __i = i;
661 asm volatile(LOCK_PREFIX "xaddw %0, %1"
662 : "+r" (i), "+m" (v->counter)
663 : : "memory");
664 return i + __i;
761} 665}
762 666
763/* 667/*
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 21f7385badb..54a13aaebc4 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -318,13 +318,13 @@ uv_gpa_in_mmr_space(unsigned long gpa)
318/* UV global physical address --> socket phys RAM */ 318/* UV global physical address --> socket phys RAM */
319static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) 319static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
320{ 320{
321 unsigned long paddr; 321 unsigned long paddr = gpa & uv_hub_info->gpa_mask;
322 unsigned long remap_base = uv_hub_info->lowmem_remap_base; 322 unsigned long remap_base = uv_hub_info->lowmem_remap_base;
323 unsigned long remap_top = uv_hub_info->lowmem_remap_top; 323 unsigned long remap_top = uv_hub_info->lowmem_remap_top;
324 324
325 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | 325 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
326 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); 326 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
327 paddr = gpa & uv_hub_info->gpa_mask; 327 gpa = gpa & uv_hub_info->gpa_mask;
328 if (paddr >= remap_base && paddr < remap_base + remap_top) 328 if (paddr >= remap_base && paddr < remap_base + remap_top)
329 paddr -= remap_base; 329 paddr -= remap_base;
330 return paddr; 330 return paddr;
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index cf1d73643f6..10474fb1185 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -57,7 +57,6 @@
57 57
58#define UV1_HUB_PART_NUMBER 0x88a5 58#define UV1_HUB_PART_NUMBER 0x88a5
59#define UV2_HUB_PART_NUMBER 0x8eb8 59#define UV2_HUB_PART_NUMBER 0x8eb8
60#define UV2_HUB_PART_NUMBER_X 0x1111
61 60
62/* Compat: if this #define is present, UV headers support UV2 */ 61/* Compat: if this #define is present, UV headers support UV2 */
63#define UV2_HUB_IS_SUPPORTED 1 62#define UV2_HUB_IS_SUPPORTED 1
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index fddb53d6391..bb0522850b7 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -11,8 +11,7 @@ extern const char VDSO32_PRELINK[];
11#define VDSO32_SYMBOL(base, name) \ 11#define VDSO32_SYMBOL(base, name) \
12({ \ 12({ \
13 extern const char VDSO32_##name[]; \ 13 extern const char VDSO32_##name[]; \
14 (void __user *)(VDSO32_##name - VDSO32_PRELINK + \ 14 (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
15 (unsigned long)(base)); \
16}) 15})
17#endif 16#endif
18 17
diff --git a/arch/x86/include/asm/vga.h b/arch/x86/include/asm/vga.h
index 44282fbf7bf..c4b9dc2f67c 100644
--- a/arch/x86/include/asm/vga.h
+++ b/arch/x86/include/asm/vga.h
@@ -17,10 +17,4 @@
17#define vga_readb(x) (*(x)) 17#define vga_readb(x) (*(x))
18#define vga_writeb(x, y) (*(y) = (x)) 18#define vga_writeb(x, y) (*(y) = (x))
19 19
20#ifdef CONFIG_FB_EFI
21#define __ARCH_HAS_VGA_DEFAULT_DEVICE
22extern struct pci_dev *vga_default_device(void);
23extern void vga_set_default_device(struct pci_dev *pdev);
24#endif
25
26#endif /* _ASM_X86_VGA_H */ 20#endif /* _ASM_X86_VGA_H */
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 46e24d36b7d..815285bcace 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,8 +5,13 @@
5#include <linux/clocksource.h> 5#include <linux/clocksource.h>
6 6
7struct vsyscall_gtod_data { 7struct vsyscall_gtod_data {
8 seqcount_t seq; 8 seqlock_t lock;
9 9
10 /* open coded 'struct timespec' */
11 time_t wall_time_sec;
12 u32 wall_time_nsec;
13
14 struct timezone sys_tz;
10 struct { /* extract of a clocksource struct */ 15 struct { /* extract of a clocksource struct */
11 int vclock_mode; 16 int vclock_mode;
12 cycle_t cycle_last; 17 cycle_t cycle_last;
@@ -14,16 +19,8 @@ struct vsyscall_gtod_data {
14 u32 mult; 19 u32 mult;
15 u32 shift; 20 u32 shift;
16 } clock; 21 } clock;
17 22 struct timespec wall_to_monotonic;
18 /* open coded 'struct timespec' */
19 time_t wall_time_sec;
20 u64 wall_time_snsec;
21 u64 monotonic_time_snsec;
22 time_t monotonic_time_sec;
23
24 struct timezone sys_tz;
25 struct timespec wall_time_coarse; 23 struct timespec wall_time_coarse;
26 struct timespec monotonic_time_coarse;
27}; 24};
28extern struct vsyscall_gtod_data vsyscall_gtod_data; 25extern struct vsyscall_gtod_data vsyscall_gtod_data;
29 26
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index 5da71c27cc5..e0f9aa16358 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -16,6 +16,7 @@
16#define _ASM_X86_VIRTEX_H 16#define _ASM_X86_VIRTEX_H
17 17
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/system.h>
19 20
20#include <asm/vmx.h> 21#include <asm/vmx.h>
21#include <asm/svm.h> 22#include <asm/svm.h>
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 1d8de3f3fec..f9303602fbc 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -1,9 +1,133 @@
1#ifndef _ASM_X86_VM86_H 1#ifndef _ASM_X86_VM86_H
2#define _ASM_X86_VM86_H 2#define _ASM_X86_VM86_H
3 3
4/*
5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how
6 * the Pentium uses them. Linux will return from vm86 mode when both
7 * VIF and VIP is set.
8 *
9 * On a Pentium, we could probably optimize the virtual flags directly
10 * in the eflags register instead of doing it "by hand" in vflags...
11 *
12 * Linus
13 */
14
15#include <asm/processor-flags.h>
16
17#define BIOSSEG 0x0f000
18
19#define CPU_086 0
20#define CPU_186 1
21#define CPU_286 2
22#define CPU_386 3
23#define CPU_486 4
24#define CPU_586 5
25
26/*
27 * Return values for the 'vm86()' system call
28 */
29#define VM86_TYPE(retval) ((retval) & 0xff)
30#define VM86_ARG(retval) ((retval) >> 8)
31
32#define VM86_SIGNAL 0 /* return due to signal */
33#define VM86_UNKNOWN 1 /* unhandled GP fault
34 - IO-instruction or similar */
35#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
36#define VM86_STI 3 /* sti/popf/iret instruction enabled
37 virtual interrupts */
38
39/*
40 * Additional return values when invoking new vm86()
41 */
42#define VM86_PICRETURN 4 /* return due to pending PIC request */
43#define VM86_TRAP 6 /* return due to DOS-debugger request */
44
45/*
46 * function codes when invoking new vm86()
47 */
48#define VM86_PLUS_INSTALL_CHECK 0
49#define VM86_ENTER 1
50#define VM86_ENTER_NO_BYPASS 2
51#define VM86_REQUEST_IRQ 3
52#define VM86_FREE_IRQ 4
53#define VM86_GET_IRQ_BITS 5
54#define VM86_GET_AND_RESET_IRQ 6
55
56/*
57 * This is the stack-layout seen by the user space program when we have
58 * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
59 * is 'kernel_vm86_regs' (see below).
60 */
61
62struct vm86_regs {
63/*
64 * normal regs, with special meaning for the segment descriptors..
65 */
66 long ebx;
67 long ecx;
68 long edx;
69 long esi;
70 long edi;
71 long ebp;
72 long eax;
73 long __null_ds;
74 long __null_es;
75 long __null_fs;
76 long __null_gs;
77 long orig_eax;
78 long eip;
79 unsigned short cs, __csh;
80 long eflags;
81 long esp;
82 unsigned short ss, __ssh;
83/*
84 * these are specific to v86 mode:
85 */
86 unsigned short es, __esh;
87 unsigned short ds, __dsh;
88 unsigned short fs, __fsh;
89 unsigned short gs, __gsh;
90};
91
92struct revectored_struct {
93 unsigned long __map[8]; /* 256 bits */
94};
95
96struct vm86_struct {
97 struct vm86_regs regs;
98 unsigned long flags;
99 unsigned long screen_bitmap;
100 unsigned long cpu_type;
101 struct revectored_struct int_revectored;
102 struct revectored_struct int21_revectored;
103};
104
105/*
106 * flags masks
107 */
108#define VM86_SCREEN_BITMAP 0x0001
109
110struct vm86plus_info_struct {
111 unsigned long force_return_for_pic:1;
112 unsigned long vm86dbg_active:1; /* for debugger */
113 unsigned long vm86dbg_TFpendig:1; /* for debugger */
114 unsigned long unused:28;
115 unsigned long is_vm86pus:1; /* for vm86 internal use */
116 unsigned char vm86dbg_intxxtab[32]; /* for debugger */
117};
118struct vm86plus_struct {
119 struct vm86_regs regs;
120 unsigned long flags;
121 unsigned long screen_bitmap;
122 unsigned long cpu_type;
123 struct revectored_struct int_revectored;
124 struct revectored_struct int21_revectored;
125 struct vm86plus_info_struct vm86plus;
126};
127
128#ifdef __KERNEL__
4 129
5#include <asm/ptrace.h> 130#include <asm/ptrace.h>
6#include <uapi/asm/vm86.h>
7 131
8/* 132/*
9 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 133 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
@@ -79,4 +203,6 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
79 203
80#endif /* CONFIG_VM86 */ 204#endif /* CONFIG_VM86 */
81 205
206#endif /* __KERNEL__ */
207
82#endif /* _ASM_X86_VM86_H */ 208#endif /* _ASM_X86_VM86_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 235b49fa554..2caf290e989 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -1,3 +1,6 @@
1#ifndef VMX_H
2#define VMX_H
3
1/* 4/*
2 * vmx.h: VMX Architecture related definitions 5 * vmx.h: VMX Architecture related definitions
3 * Copyright (c) 2004, Intel Corporation. 6 * Copyright (c) 2004, Intel Corporation.
@@ -21,12 +24,8 @@
21 * Yaniv Kamay <yaniv@qumranet.com> 24 * Yaniv Kamay <yaniv@qumranet.com>
22 * 25 *
23 */ 26 */
24#ifndef VMX_H
25#define VMX_H
26
27 27
28#include <linux/types.h> 28#include <linux/types.h>
29#include <uapi/asm/vmx.h>
30 29
31/* 30/*
32 * Definitions of Primary Processor-Based VM-Execution Controls. 31 * Definitions of Primary Processor-Based VM-Execution Controls.
@@ -61,7 +60,6 @@
61#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 60#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
62#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 61#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
63#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 62#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
64#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
65 63
66 64
67#define PIN_BASED_EXT_INTR_MASK 0x00000001 65#define PIN_BASED_EXT_INTR_MASK 0x00000001
@@ -242,6 +240,48 @@ enum vmcs_field {
242 HOST_RIP = 0x00006c16, 240 HOST_RIP = 0x00006c16,
243}; 241};
244 242
243#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
244
245#define EXIT_REASON_EXCEPTION_NMI 0
246#define EXIT_REASON_EXTERNAL_INTERRUPT 1
247#define EXIT_REASON_TRIPLE_FAULT 2
248
249#define EXIT_REASON_PENDING_INTERRUPT 7
250#define EXIT_REASON_NMI_WINDOW 8
251#define EXIT_REASON_TASK_SWITCH 9
252#define EXIT_REASON_CPUID 10
253#define EXIT_REASON_HLT 12
254#define EXIT_REASON_INVD 13
255#define EXIT_REASON_INVLPG 14
256#define EXIT_REASON_RDPMC 15
257#define EXIT_REASON_RDTSC 16
258#define EXIT_REASON_VMCALL 18
259#define EXIT_REASON_VMCLEAR 19
260#define EXIT_REASON_VMLAUNCH 20
261#define EXIT_REASON_VMPTRLD 21
262#define EXIT_REASON_VMPTRST 22
263#define EXIT_REASON_VMREAD 23
264#define EXIT_REASON_VMRESUME 24
265#define EXIT_REASON_VMWRITE 25
266#define EXIT_REASON_VMOFF 26
267#define EXIT_REASON_VMON 27
268#define EXIT_REASON_CR_ACCESS 28
269#define EXIT_REASON_DR_ACCESS 29
270#define EXIT_REASON_IO_INSTRUCTION 30
271#define EXIT_REASON_MSR_READ 31
272#define EXIT_REASON_MSR_WRITE 32
273#define EXIT_REASON_INVALID_STATE 33
274#define EXIT_REASON_MWAIT_INSTRUCTION 36
275#define EXIT_REASON_MONITOR_INSTRUCTION 39
276#define EXIT_REASON_PAUSE_INSTRUCTION 40
277#define EXIT_REASON_MCE_DURING_VMENTRY 41
278#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
279#define EXIT_REASON_APIC_ACCESS 44
280#define EXIT_REASON_EPT_VIOLATION 48
281#define EXIT_REASON_EPT_MISCONFIG 49
282#define EXIT_REASON_WBINVD 54
283#define EXIT_REASON_XSETBV 55
284
245/* 285/*
246 * Interruption-information format 286 * Interruption-information format
247 */ 287 */
@@ -310,18 +350,6 @@ enum vmcs_field {
310#define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */ 350#define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */
311 351
312 352
313/*
314 * Exit Qualifications for APIC-Access
315 */
316#define APIC_ACCESS_OFFSET 0xfff /* 11:0, offset within the APIC page */
317#define APIC_ACCESS_TYPE 0xf000 /* 15:12, access type */
318#define TYPE_LINEAR_APIC_INST_READ (0 << 12)
319#define TYPE_LINEAR_APIC_INST_WRITE (1 << 12)
320#define TYPE_LINEAR_APIC_INST_FETCH (2 << 12)
321#define TYPE_LINEAR_APIC_EVENT (3 << 12)
322#define TYPE_PHYSICAL_APIC_EVENT (10 << 12)
323#define TYPE_PHYSICAL_APIC_INST (15 << 12)
324
325/* segment AR */ 353/* segment AR */
326#define SEGMENT_AR_L_MASK (1 << 13) 354#define SEGMENT_AR_L_MASK (1 << 13)
327 355
@@ -364,7 +392,7 @@ enum vmcs_field {
364#define VMX_EPTP_WB_BIT (1ull << 14) 392#define VMX_EPTP_WB_BIT (1ull << 14)
365#define VMX_EPT_2MB_PAGE_BIT (1ull << 16) 393#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
366#define VMX_EPT_1GB_PAGE_BIT (1ull << 17) 394#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
367#define VMX_EPT_AD_BIT (1ull << 21) 395#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
368#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) 396#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
369#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 397#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
370 398
@@ -375,14 +403,11 @@ enum vmcs_field {
375#define VMX_EPT_MAX_GAW 0x4 403#define VMX_EPT_MAX_GAW 0x4
376#define VMX_EPT_MT_EPTE_SHIFT 3 404#define VMX_EPT_MT_EPTE_SHIFT 3
377#define VMX_EPT_GAW_EPTP_SHIFT 3 405#define VMX_EPT_GAW_EPTP_SHIFT 3
378#define VMX_EPT_AD_ENABLE_BIT (1ull << 6)
379#define VMX_EPT_DEFAULT_MT 0x6ull 406#define VMX_EPT_DEFAULT_MT 0x6ull
380#define VMX_EPT_READABLE_MASK 0x1ull 407#define VMX_EPT_READABLE_MASK 0x1ull
381#define VMX_EPT_WRITABLE_MASK 0x2ull 408#define VMX_EPT_WRITABLE_MASK 0x2ull
382#define VMX_EPT_EXECUTABLE_MASK 0x4ull 409#define VMX_EPT_EXECUTABLE_MASK 0x4ull
383#define VMX_EPT_IPAT_BIT (1ull << 6) 410#define VMX_EPT_IPAT_BIT (1ull << 6)
384#define VMX_EPT_ACCESS_BIT (1ull << 8)
385#define VMX_EPT_DIRTY_BIT (1ull << 9)
386 411
387#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul 412#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
388 413
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index 2a46ca720af..eaea1d31f75 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -1,8 +1,20 @@
1#ifndef _ASM_X86_VSYSCALL_H 1#ifndef _ASM_X86_VSYSCALL_H
2#define _ASM_X86_VSYSCALL_H 2#define _ASM_X86_VSYSCALL_H
3 3
4enum vsyscall_num {
5 __NR_vgettimeofday,
6 __NR_vtime,
7 __NR_vgetcpu,
8};
9
10#define VSYSCALL_START (-10UL << 20)
11#define VSYSCALL_SIZE 1024
12#define VSYSCALL_END (-2UL << 20)
13#define VSYSCALL_MAPPED_PAGES 1
14#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
15
16#ifdef __KERNEL__
4#include <linux/seqlock.h> 17#include <linux/seqlock.h>
5#include <uapi/asm/vsyscall.h>
6 18
7#define VGETCPU_RDTSCP 1 19#define VGETCPU_RDTSCP 1
8#define VGETCPU_LSL 2 20#define VGETCPU_LSL 2
@@ -21,24 +33,6 @@ extern void map_vsyscall(void);
21 */ 33 */
22extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); 34extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
23 35
24#ifdef CONFIG_X86_64 36#endif /* __KERNEL__ */
25
26#define VGETCPU_CPU_MASK 0xfff
27
28static inline unsigned int __getcpu(void)
29{
30 unsigned int p;
31
32 if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
33 /* Load per CPU data from RDTSCP */
34 native_read_tscp(&p);
35 } else {
36 /* Load per CPU data from GDT */
37 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
38 }
39
40 return p;
41}
42#endif /* CONFIG_X86_64 */
43 37
44#endif /* _ASM_X86_VSYSCALL_H */ 38#endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
deleted file mode 100644
index 5b238981542..00000000000
--- a/arch/x86/include/asm/word-at-a-time.h
+++ /dev/null
@@ -1,105 +0,0 @@
1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H
3
4#include <linux/kernel.h>
5
6/*
7 * This is largely generic for little-endian machines, but the
8 * optimal byte mask counting is probably going to be something
9 * that is architecture-specific. If you have a reliably fast
10 * bit count instruction, that might be better than the multiply
11 * and shift, for example.
12 */
13struct word_at_a_time {
14 const unsigned long one_bits, high_bits;
15};
16
17#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
18
19#ifdef CONFIG_64BIT
20
21/*
22 * Jan Achrenius on G+: microoptimized version of
23 * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
24 * that works for the bytemasks without having to
25 * mask them first.
26 */
27static inline long count_masked_bytes(unsigned long mask)
28{
29 return mask*0x0001020304050608ul >> 56;
30}
31
32#else /* 32-bit case */
33
34/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
35static inline long count_masked_bytes(long mask)
36{
37 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
38 long a = (0x0ff0001+mask) >> 23;
39 /* Fix the 1 for 00 case */
40 return a & mask;
41}
42
43#endif
44
45/* Return nonzero if it has a zero */
46static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
47{
48 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
49 *bits = mask;
50 return mask;
51}
52
53static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
54{
55 return bits;
56}
57
58static inline unsigned long create_zero_mask(unsigned long bits)
59{
60 bits = (bits - 1) & ~bits;
61 return bits >> 7;
62}
63
64/* The mask we created is directly usable as a bytemask */
65#define zero_bytemask(mask) (mask)
66
67static inline unsigned long find_zero(unsigned long mask)
68{
69 return count_masked_bytes(mask);
70}
71
72/*
73 * Load an unaligned word from kernel space.
74 *
75 * In the (very unlikely) case of the word being a page-crosser
76 * and the next page not being mapped, take the exception and
77 * return zeroes in the non-existing part.
78 */
79static inline unsigned long load_unaligned_zeropad(const void *addr)
80{
81 unsigned long ret, dummy;
82
83 asm(
84 "1:\tmov %2,%0\n"
85 "2:\n"
86 ".section .fixup,\"ax\"\n"
87 "3:\t"
88 "lea %2,%1\n\t"
89 "and %3,%1\n\t"
90 "mov (%1),%0\n\t"
91 "leal %2,%%ecx\n\t"
92 "andl %4,%%ecx\n\t"
93 "shll $3,%%ecx\n\t"
94 "shr %%cl,%0\n\t"
95 "jmp 2b\n"
96 ".previous\n"
97 _ASM_EXTABLE(1b, 3b)
98 :"=&r" (ret),"=&c" (dummy)
99 :"m" (*(unsigned long *)addr),
100 "i" (-sizeof(unsigned long)),
101 "i" (sizeof(unsigned long)-1));
102 return ret;
103}
104
105#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
index f90f0a587c6..6bf5b8e478c 100644
--- a/arch/x86/include/asm/x2apic.h
+++ b/arch/x86/include/asm/x2apic.h
@@ -9,9 +9,13 @@
9#include <asm/ipi.h> 9#include <asm/ipi.h>
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11 11
12static int x2apic_apic_id_valid(int apicid) 12/*
13 * Need to use more than cpu 0, because we need more vectors
14 * when MSI-X are used.
15 */
16static const struct cpumask *x2apic_target_cpus(void)
13{ 17{
14 return 1; 18 return cpu_online_mask;
15} 19}
16 20
17static int x2apic_apic_id_registered(void) 21static int x2apic_apic_id_registered(void)
@@ -19,6 +23,15 @@ static int x2apic_apic_id_registered(void)
19 return 1; 23 return 1;
20} 24}
21 25
26/*
27 * For now each logical cpu is in its own vector allocation domain.
28 */
29static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
30{
31 cpumask_clear(retmask);
32 cpumask_set_cpu(cpu, retmask);
33}
34
22static void 35static void
23__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) 36__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
24{ 37{
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 57693498519..d3d859035af 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -7,7 +7,6 @@
7struct mpc_bus; 7struct mpc_bus;
8struct mpc_cpu; 8struct mpc_cpu;
9struct mpc_table; 9struct mpc_table;
10struct cpuinfo_x86;
11 10
12/** 11/**
13 * struct x86_init_mpparse - platform specific mpparse ops 12 * struct x86_init_mpparse - platform specific mpparse ops
@@ -81,13 +80,12 @@ struct x86_init_mapping {
81 80
82/** 81/**
83 * struct x86_init_paging - platform specific paging functions 82 * struct x86_init_paging - platform specific paging functions
84 * @pagetable_init: platform specific paging initialization call to setup 83 * @pagetable_setup_start: platform specific pre paging_init() call
85 * the kernel pagetables and prepare accessors functions. 84 * @pagetable_setup_done: platform specific post paging_init() call
86 * Callback must call paging_init(). Called once after the
87 * direct mapping for phys memory is available.
88 */ 85 */
89struct x86_init_paging { 86struct x86_init_paging {
90 void (*pagetable_init)(void); 87 void (*pagetable_setup_start)(pgd_t *base);
88 void (*pagetable_setup_done)(pgd_t *base);
91}; 89};
92 90
93/** 91/**
@@ -146,12 +144,9 @@ struct x86_init_ops {
146/** 144/**
147 * struct x86_cpuinit_ops - platform specific cpu hotplug setups 145 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
148 * @setup_percpu_clockev: set up the per cpu clock event device 146 * @setup_percpu_clockev: set up the per cpu clock event device
149 * @early_percpu_clock_init: early init of the per cpu clock event device
150 */ 147 */
151struct x86_cpuinit_ops { 148struct x86_cpuinit_ops {
152 void (*setup_percpu_clockev)(void); 149 void (*setup_percpu_clockev)(void);
153 void (*early_percpu_clock_init)(void);
154 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
155}; 150};
156 151
157/** 152/**
@@ -162,9 +157,6 @@ struct x86_cpuinit_ops {
162 * @is_untracked_pat_range exclude from PAT logic 157 * @is_untracked_pat_range exclude from PAT logic
163 * @nmi_init enable NMI on cpus 158 * @nmi_init enable NMI on cpus
164 * @i8042_detect pre-detect if i8042 controller exists 159 * @i8042_detect pre-detect if i8042 controller exists
165 * @save_sched_clock_state: save state for sched_clock() on suspend
166 * @restore_sched_clock_state: restore state for sched_clock() on resume
167 * @apic_post_init: adjust apic if neeeded
168 */ 160 */
169struct x86_platform_ops { 161struct x86_platform_ops {
170 unsigned long (*calibrate_tsc)(void); 162 unsigned long (*calibrate_tsc)(void);
@@ -173,11 +165,7 @@ struct x86_platform_ops {
173 void (*iommu_shutdown)(void); 165 void (*iommu_shutdown)(void);
174 bool (*is_untracked_pat_range)(u64 start, u64 end); 166 bool (*is_untracked_pat_range)(u64 start, u64 end);
175 void (*nmi_init)(void); 167 void (*nmi_init)(void);
176 unsigned char (*get_nmi_reason)(void);
177 int (*i8042_detect)(void); 168 int (*i8042_detect)(void);
178 void (*save_sched_clock_state)(void);
179 void (*restore_sched_clock_state)(void);
180 void (*apic_post_init)(void);
181}; 169};
182 170
183struct pci_dev; 171struct pci_dev;
@@ -186,21 +174,13 @@ struct x86_msi_ops {
186 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); 174 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
187 void (*teardown_msi_irq)(unsigned int irq); 175 void (*teardown_msi_irq)(unsigned int irq);
188 void (*teardown_msi_irqs)(struct pci_dev *dev); 176 void (*teardown_msi_irqs)(struct pci_dev *dev);
189 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
190};
191
192struct x86_io_apic_ops {
193 void (*init) (void);
194 unsigned int (*read) (unsigned int apic, unsigned int reg);
195 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
196 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
197}; 177};
198 178
199extern struct x86_init_ops x86_init; 179extern struct x86_init_ops x86_init;
200extern struct x86_cpuinit_ops x86_cpuinit; 180extern struct x86_cpuinit_ops x86_cpuinit;
201extern struct x86_platform_ops x86_platform; 181extern struct x86_platform_ops x86_platform;
202extern struct x86_msi_ops x86_msi; 182extern struct x86_msi_ops x86_msi;
203extern struct x86_io_apic_ops x86_io_apic_ops; 183
204extern void x86_init_noop(void); 184extern void x86_init_noop(void);
205extern void x86_init_uint_noop(unsigned int unused); 185extern void x86_init_uint_noop(unsigned int unused);
206 186
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index cc146d51449..1df35417c41 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -6,7 +6,6 @@ enum ipi_vector {
6 XEN_CALL_FUNCTION_VECTOR, 6 XEN_CALL_FUNCTION_VECTOR,
7 XEN_CALL_FUNCTION_SINGLE_VECTOR, 7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
8 XEN_SPIN_UNLOCK_VECTOR, 8 XEN_SPIN_UNLOCK_VECTOR,
9 XEN_IRQ_WORK_VECTOR,
10 9
11 XEN_NR_IPIS, 10 XEN_NR_IPIS,
12}; 11};
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index c20d1ce62dc..417777de5a4 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -47,8 +47,6 @@
47#include <xen/interface/xen.h> 47#include <xen/interface/xen.h>
48#include <xen/interface/sched.h> 48#include <xen/interface/sched.h>
49#include <xen/interface/physdev.h> 49#include <xen/interface/physdev.h>
50#include <xen/interface/platform.h>
51#include <xen/interface/xen-mca.h>
52 50
53/* 51/*
54 * The hypercall asms have to meet several constraints: 52 * The hypercall asms have to meet several constraints:
@@ -303,20 +301,6 @@ HYPERVISOR_set_timer_op(u64 timeout)
303} 301}
304 302
305static inline int 303static inline int
306HYPERVISOR_mca(struct xen_mc *mc_op)
307{
308 mc_op->interface_version = XEN_MCA_INTERFACE_VERSION;
309 return _hypercall1(int, mca, mc_op);
310}
311
312static inline int
313HYPERVISOR_dom0_op(struct xen_platform_op *platform_op)
314{
315 platform_op->interface_version = XENPF_INTERFACE_VERSION;
316 return _hypercall1(int, dom0_op, platform_op);
317}
318
319static inline int
320HYPERVISOR_set_debugreg(int reg, unsigned long value) 304HYPERVISOR_set_debugreg(int reg, unsigned long value)
321{ 305{
322 return _hypercall2(int, set_debugreg, reg, value); 306 return _hypercall2(int, set_debugreg, reg, value);
@@ -359,14 +343,18 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
359 return _hypercall4(int, update_va_mapping, va, 343 return _hypercall4(int, update_va_mapping, va,
360 new_val.pte, new_val.pte >> 32, flags); 344 new_val.pte, new_val.pte >> 32, flags);
361} 345}
362extern int __must_check xen_event_channel_op_compat(int, void *);
363 346
364static inline int 347static inline int
365HYPERVISOR_event_channel_op(int cmd, void *arg) 348HYPERVISOR_event_channel_op(int cmd, void *arg)
366{ 349{
367 int rc = _hypercall2(int, event_channel_op, cmd, arg); 350 int rc = _hypercall2(int, event_channel_op, cmd, arg);
368 if (unlikely(rc == -ENOSYS)) 351 if (unlikely(rc == -ENOSYS)) {
369 rc = xen_event_channel_op_compat(cmd, arg); 352 struct evtchn_op op;
353 op.cmd = cmd;
354 memcpy(&op.u, arg, sizeof(op.u));
355 rc = _hypercall1(int, event_channel_op_compat, &op);
356 memcpy(arg, &op.u, sizeof(op.u));
357 }
370 return rc; 358 return rc;
371} 359}
372 360
@@ -382,14 +370,17 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
382 return _hypercall3(int, console_io, cmd, count, str); 370 return _hypercall3(int, console_io, cmd, count, str);
383} 371}
384 372
385extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
386
387static inline int 373static inline int
388HYPERVISOR_physdev_op(int cmd, void *arg) 374HYPERVISOR_physdev_op(int cmd, void *arg)
389{ 375{
390 int rc = _hypercall2(int, physdev_op, cmd, arg); 376 int rc = _hypercall2(int, physdev_op, cmd, arg);
391 if (unlikely(rc == -ENOSYS)) 377 if (unlikely(rc == -ENOSYS)) {
392 rc = HYPERVISOR_physdev_op_compat(cmd, arg); 378 struct physdev_op op;
379 op.cmd = cmd;
380 memcpy(&op.u, arg, sizeof(op.u));
381 rc = _hypercall1(int, physdev_op_compat, &op);
382 memcpy(arg, &op.u, sizeof(op.u));
383 }
393 return rc; 384 return rc;
394} 385}
395 386
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 125f344f06a..66d0fff1ee8 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -33,6 +33,7 @@
33#ifndef _ASM_X86_XEN_HYPERVISOR_H 33#ifndef _ASM_X86_XEN_HYPERVISOR_H
34#define _ASM_X86_XEN_HYPERVISOR_H 34#define _ASM_X86_XEN_HYPERVISOR_H
35 35
36/* arch/i386/kernel/setup.c */
36extern struct shared_info *HYPERVISOR_shared_info; 37extern struct shared_info *HYPERVISOR_shared_info;
37extern struct start_info *xen_start_info; 38extern struct start_info *xen_start_info;
38 39
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index fd9cb7695b5..5d4922ad4b9 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -47,23 +47,14 @@
47#endif 47#endif
48 48
49#ifndef __ASSEMBLY__ 49#ifndef __ASSEMBLY__
50/* Explicitly size integers that represent pfns in the public interface
51 * with Xen so that on ARM we can have one ABI that works for 32 and 64
52 * bit guests. */
53typedef unsigned long xen_pfn_t;
54#define PRI_xen_pfn "lx"
55typedef unsigned long xen_ulong_t;
56#define PRI_xen_ulong "lx"
57/* Guest handles for primitive C types. */ 50/* Guest handles for primitive C types. */
58__DEFINE_GUEST_HANDLE(uchar, unsigned char); 51__DEFINE_GUEST_HANDLE(uchar, unsigned char);
59__DEFINE_GUEST_HANDLE(uint, unsigned int); 52__DEFINE_GUEST_HANDLE(uint, unsigned int);
53__DEFINE_GUEST_HANDLE(ulong, unsigned long);
60DEFINE_GUEST_HANDLE(char); 54DEFINE_GUEST_HANDLE(char);
61DEFINE_GUEST_HANDLE(int); 55DEFINE_GUEST_HANDLE(int);
56DEFINE_GUEST_HANDLE(long);
62DEFINE_GUEST_HANDLE(void); 57DEFINE_GUEST_HANDLE(void);
63DEFINE_GUEST_HANDLE(uint64_t);
64DEFINE_GUEST_HANDLE(uint32_t);
65DEFINE_GUEST_HANDLE(xen_pfn_t);
66DEFINE_GUEST_HANDLE(xen_ulong_t);
67#endif 58#endif
68 59
69#ifndef HYPERVISOR_VIRT_START 60#ifndef HYPERVISOR_VIRT_START
@@ -123,13 +114,11 @@ struct arch_shared_info {
123#endif /* !__ASSEMBLY__ */ 114#endif /* !__ASSEMBLY__ */
124 115
125#ifdef CONFIG_X86_32 116#ifdef CONFIG_X86_32
126#include <asm/xen/interface_32.h> 117#include "interface_32.h"
127#else 118#else
128#include <asm/xen/interface_64.h> 119#include "interface_64.h"
129#endif 120#endif
130 121
131#include <asm/pvclock-abi.h>
132
133#ifndef __ASSEMBLY__ 122#ifndef __ASSEMBLY__
134/* 123/*
135 * The following is all CPU context. Note that the fpu_ctxt block is filled 124 * The following is all CPU context. Note that the fpu_ctxt block is filled
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 472b9b78301..7ff4669580c 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -12,7 +12,6 @@
12#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13 13
14#include <xen/interface/xen.h> 14#include <xen/interface/xen.h>
15#include <xen/grant_table.h>
16#include <xen/features.h> 15#include <xen/features.h>
17 16
18/* Xen machine address */ 17/* Xen machine address */
@@ -44,18 +43,19 @@ extern unsigned long machine_to_phys_nr;
44 43
45extern unsigned long get_phys_to_machine(unsigned long pfn); 44extern unsigned long get_phys_to_machine(unsigned long pfn);
46extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 45extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
47extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
48extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 46extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
49extern unsigned long set_phys_range_identity(unsigned long pfn_s, 47extern unsigned long set_phys_range_identity(unsigned long pfn_s,
50 unsigned long pfn_e); 48 unsigned long pfn_e);
51 49
52extern int m2p_add_override(unsigned long mfn, struct page *page, 50extern int m2p_add_override(unsigned long mfn, struct page *page,
53 struct gnttab_map_grant_ref *kmap_op); 51 bool clear_pte);
54extern int m2p_remove_override(struct page *page, 52extern int m2p_remove_override(struct page *page, bool clear_pte);
55 struct gnttab_map_grant_ref *kmap_op);
56extern struct page *m2p_find_override(unsigned long mfn); 53extern struct page *m2p_find_override(unsigned long mfn);
57extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); 54extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
58 55
56#ifdef CONFIG_XEN_DEBUG_FS
57extern int p2m_dump_show(struct seq_file *m, void *v);
58#endif
59static inline unsigned long pfn_to_mfn(unsigned long pfn) 59static inline unsigned long pfn_to_mfn(unsigned long pfn)
60{ 60{
61 unsigned long mfn; 61 unsigned long mfn;
diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h
index ee52fcac6f7..1be1ab7d6a4 100644
--- a/arch/x86/include/asm/xen/swiotlb-xen.h
+++ b/arch/x86/include/asm/xen/swiotlb-xen.h
@@ -5,12 +5,10 @@
5extern int xen_swiotlb; 5extern int xen_swiotlb;
6extern int __init pci_xen_swiotlb_detect(void); 6extern int __init pci_xen_swiotlb_detect(void);
7extern void __init pci_xen_swiotlb_init(void); 7extern void __init pci_xen_swiotlb_init(void);
8extern int pci_xen_swiotlb_init_late(void);
9#else 8#else
10#define xen_swiotlb (0) 9#define xen_swiotlb (0)
11static inline int __init pci_xen_swiotlb_detect(void) { return 0; } 10static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
12static inline void __init pci_xen_swiotlb_init(void) { } 11static inline void __init pci_xen_swiotlb_init(void) { }
13static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
14#endif 12#endif
15 13
16#endif /* _ASM_X86_SWIOTLB_XEN_H */ 14#endif /* _ASM_X86_SWIOTLB_XEN_H */
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index f8fde90bc45..7fcf6f3dbcc 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -3,8 +3,8 @@
3# include <asm-generic/xor.h> 3# include <asm-generic/xor.h>
4#else 4#else
5#ifdef CONFIG_X86_32 5#ifdef CONFIG_X86_32
6# include <asm/xor_32.h> 6# include "xor_32.h"
7#else 7#else
8# include <asm/xor_64.h> 8# include "xor_64.h"
9#endif 9#endif
10#endif 10#endif
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index f79cb7ec0e0..133b40a0f49 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -534,6 +534,38 @@ static struct xor_block_template xor_block_p5_mmx = {
534 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) 534 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
535 */ 535 */
536 536
537#define XMMS_SAVE \
538do { \
539 preempt_disable(); \
540 cr0 = read_cr0(); \
541 clts(); \
542 asm volatile( \
543 "movups %%xmm0,(%0) ;\n\t" \
544 "movups %%xmm1,0x10(%0) ;\n\t" \
545 "movups %%xmm2,0x20(%0) ;\n\t" \
546 "movups %%xmm3,0x30(%0) ;\n\t" \
547 : \
548 : "r" (xmm_save) \
549 : "memory"); \
550} while (0)
551
552#define XMMS_RESTORE \
553do { \
554 asm volatile( \
555 "sfence ;\n\t" \
556 "movups (%0),%%xmm0 ;\n\t" \
557 "movups 0x10(%0),%%xmm1 ;\n\t" \
558 "movups 0x20(%0),%%xmm2 ;\n\t" \
559 "movups 0x30(%0),%%xmm3 ;\n\t" \
560 : \
561 : "r" (xmm_save) \
562 : "memory"); \
563 write_cr0(cr0); \
564 preempt_enable(); \
565} while (0)
566
567#define ALIGN16 __attribute__((aligned(16)))
568
537#define OFFS(x) "16*("#x")" 569#define OFFS(x) "16*("#x")"
538#define PF_OFFS(x) "256+16*("#x")" 570#define PF_OFFS(x) "256+16*("#x")"
539#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" 571#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
@@ -555,8 +587,10 @@ static void
555xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) 587xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
556{ 588{
557 unsigned long lines = bytes >> 8; 589 unsigned long lines = bytes >> 8;
590 char xmm_save[16*4] ALIGN16;
591 int cr0;
558 592
559 kernel_fpu_begin(); 593 XMMS_SAVE;
560 594
561 asm volatile( 595 asm volatile(
562#undef BLOCK 596#undef BLOCK
@@ -599,7 +633,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
599 : 633 :
600 : "memory"); 634 : "memory");
601 635
602 kernel_fpu_end(); 636 XMMS_RESTORE;
603} 637}
604 638
605static void 639static void
@@ -607,8 +641,10 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
607 unsigned long *p3) 641 unsigned long *p3)
608{ 642{
609 unsigned long lines = bytes >> 8; 643 unsigned long lines = bytes >> 8;
644 char xmm_save[16*4] ALIGN16;
645 int cr0;
610 646
611 kernel_fpu_begin(); 647 XMMS_SAVE;
612 648
613 asm volatile( 649 asm volatile(
614#undef BLOCK 650#undef BLOCK
@@ -658,7 +694,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
658 : 694 :
659 : "memory" ); 695 : "memory" );
660 696
661 kernel_fpu_end(); 697 XMMS_RESTORE;
662} 698}
663 699
664static void 700static void
@@ -666,8 +702,10 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
666 unsigned long *p3, unsigned long *p4) 702 unsigned long *p3, unsigned long *p4)
667{ 703{
668 unsigned long lines = bytes >> 8; 704 unsigned long lines = bytes >> 8;
705 char xmm_save[16*4] ALIGN16;
706 int cr0;
669 707
670 kernel_fpu_begin(); 708 XMMS_SAVE;
671 709
672 asm volatile( 710 asm volatile(
673#undef BLOCK 711#undef BLOCK
@@ -724,7 +762,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
724 : 762 :
725 : "memory" ); 763 : "memory" );
726 764
727 kernel_fpu_end(); 765 XMMS_RESTORE;
728} 766}
729 767
730static void 768static void
@@ -732,8 +770,10 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
732 unsigned long *p3, unsigned long *p4, unsigned long *p5) 770 unsigned long *p3, unsigned long *p4, unsigned long *p5)
733{ 771{
734 unsigned long lines = bytes >> 8; 772 unsigned long lines = bytes >> 8;
773 char xmm_save[16*4] ALIGN16;
774 int cr0;
735 775
736 kernel_fpu_begin(); 776 XMMS_SAVE;
737 777
738 /* Make sure GCC forgets anything it knows about p4 or p5, 778 /* Make sure GCC forgets anything it knows about p4 or p5,
739 such that it won't pass to the asm volatile below a 779 such that it won't pass to the asm volatile below a
@@ -810,7 +850,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
810 like assuming they have some legal value. */ 850 like assuming they have some legal value. */
811 asm("" : "=r" (p4), "=r" (p5)); 851 asm("" : "=r" (p4), "=r" (p5));
812 852
813 kernel_fpu_end(); 853 XMMS_RESTORE;
814} 854}
815 855
816static struct xor_block_template xor_block_pIII_sse = { 856static struct xor_block_template xor_block_pIII_sse = {
@@ -821,9 +861,6 @@ static struct xor_block_template xor_block_pIII_sse = {
821 .do_5 = xor_sse_5, 861 .do_5 = xor_sse_5,
822}; 862};
823 863
824/* Also try the AVX routines */
825#include <asm/xor_avx.h>
826
827/* Also try the generic routines. */ 864/* Also try the generic routines. */
828#include <asm-generic/xor.h> 865#include <asm-generic/xor.h>
829 866
@@ -834,7 +871,6 @@ do { \
834 xor_speed(&xor_block_8regs_p); \ 871 xor_speed(&xor_block_8regs_p); \
835 xor_speed(&xor_block_32regs); \ 872 xor_speed(&xor_block_32regs); \
836 xor_speed(&xor_block_32regs_p); \ 873 xor_speed(&xor_block_32regs_p); \
837 AVX_XOR_SPEED; \
838 if (cpu_has_xmm) \ 874 if (cpu_has_xmm) \
839 xor_speed(&xor_block_pIII_sse); \ 875 xor_speed(&xor_block_pIII_sse); \
840 if (cpu_has_mmx) { \ 876 if (cpu_has_mmx) { \
@@ -847,6 +883,6 @@ do { \
847 We may also be able to load into the L1 only depending on how the cpu 883 We may also be able to load into the L1 only depending on how the cpu
848 deals with a load to a line that is being prefetched. */ 884 deals with a load to a line that is being prefetched. */
849#define XOR_SELECT_TEMPLATE(FASTEST) \ 885#define XOR_SELECT_TEMPLATE(FASTEST) \
850 AVX_SELECT(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) 886 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
851 887
852#endif /* _ASM_X86_XOR_32_H */ 888#endif /* _ASM_X86_XOR_32_H */
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h
index 87ac522c4af..1549b5e261f 100644
--- a/arch/x86/include/asm/xor_64.h
+++ b/arch/x86/include/asm/xor_64.h
@@ -34,7 +34,41 @@
34 * no advantages to be gotten from x86-64 here anyways. 34 * no advantages to be gotten from x86-64 here anyways.
35 */ 35 */
36 36
37#include <asm/i387.h> 37typedef struct {
38 unsigned long a, b;
39} __attribute__((aligned(16))) xmm_store_t;
40
41/* Doesn't use gcc to save the XMM registers, because there is no easy way to
42 tell it to do a clts before the register saving. */
43#define XMMS_SAVE \
44do { \
45 preempt_disable(); \
46 asm volatile( \
47 "movq %%cr0,%0 ;\n\t" \
48 "clts ;\n\t" \
49 "movups %%xmm0,(%1) ;\n\t" \
50 "movups %%xmm1,0x10(%1) ;\n\t" \
51 "movups %%xmm2,0x20(%1) ;\n\t" \
52 "movups %%xmm3,0x30(%1) ;\n\t" \
53 : "=&r" (cr0) \
54 : "r" (xmm_save) \
55 : "memory"); \
56} while (0)
57
58#define XMMS_RESTORE \
59do { \
60 asm volatile( \
61 "sfence ;\n\t" \
62 "movups (%1),%%xmm0 ;\n\t" \
63 "movups 0x10(%1),%%xmm1 ;\n\t" \
64 "movups 0x20(%1),%%xmm2 ;\n\t" \
65 "movups 0x30(%1),%%xmm3 ;\n\t" \
66 "movq %0,%%cr0 ;\n\t" \
67 : \
68 : "r" (cr0), "r" (xmm_save) \
69 : "memory"); \
70 preempt_enable(); \
71} while (0)
38 72
39#define OFFS(x) "16*("#x")" 73#define OFFS(x) "16*("#x")"
40#define PF_OFFS(x) "256+16*("#x")" 74#define PF_OFFS(x) "256+16*("#x")"
@@ -57,8 +91,10 @@ static void
57xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) 91xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
58{ 92{
59 unsigned int lines = bytes >> 8; 93 unsigned int lines = bytes >> 8;
94 unsigned long cr0;
95 xmm_store_t xmm_save[4];
60 96
61 kernel_fpu_begin(); 97 XMMS_SAVE;
62 98
63 asm volatile( 99 asm volatile(
64#undef BLOCK 100#undef BLOCK
@@ -99,7 +135,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
99 : [inc] "r" (256UL) 135 : [inc] "r" (256UL)
100 : "memory"); 136 : "memory");
101 137
102 kernel_fpu_end(); 138 XMMS_RESTORE;
103} 139}
104 140
105static void 141static void
@@ -107,8 +143,11 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
107 unsigned long *p3) 143 unsigned long *p3)
108{ 144{
109 unsigned int lines = bytes >> 8; 145 unsigned int lines = bytes >> 8;
146 xmm_store_t xmm_save[4];
147 unsigned long cr0;
148
149 XMMS_SAVE;
110 150
111 kernel_fpu_begin();
112 asm volatile( 151 asm volatile(
113#undef BLOCK 152#undef BLOCK
114#define BLOCK(i) \ 153#define BLOCK(i) \
@@ -155,7 +194,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
155 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) 194 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
156 : [inc] "r" (256UL) 195 : [inc] "r" (256UL)
157 : "memory"); 196 : "memory");
158 kernel_fpu_end(); 197 XMMS_RESTORE;
159} 198}
160 199
161static void 200static void
@@ -163,8 +202,10 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
163 unsigned long *p3, unsigned long *p4) 202 unsigned long *p3, unsigned long *p4)
164{ 203{
165 unsigned int lines = bytes >> 8; 204 unsigned int lines = bytes >> 8;
205 xmm_store_t xmm_save[4];
206 unsigned long cr0;
166 207
167 kernel_fpu_begin(); 208 XMMS_SAVE;
168 209
169 asm volatile( 210 asm volatile(
170#undef BLOCK 211#undef BLOCK
@@ -220,7 +261,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
220 : [inc] "r" (256UL) 261 : [inc] "r" (256UL)
221 : "memory" ); 262 : "memory" );
222 263
223 kernel_fpu_end(); 264 XMMS_RESTORE;
224} 265}
225 266
226static void 267static void
@@ -228,8 +269,10 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
228 unsigned long *p3, unsigned long *p4, unsigned long *p5) 269 unsigned long *p3, unsigned long *p4, unsigned long *p5)
229{ 270{
230 unsigned int lines = bytes >> 8; 271 unsigned int lines = bytes >> 8;
272 xmm_store_t xmm_save[4];
273 unsigned long cr0;
231 274
232 kernel_fpu_begin(); 275 XMMS_SAVE;
233 276
234 asm volatile( 277 asm volatile(
235#undef BLOCK 278#undef BLOCK
@@ -293,7 +336,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
293 : [inc] "r" (256UL) 336 : [inc] "r" (256UL)
294 : "memory"); 337 : "memory");
295 338
296 kernel_fpu_end(); 339 XMMS_RESTORE;
297} 340}
298 341
299static struct xor_block_template xor_block_sse = { 342static struct xor_block_template xor_block_sse = {
@@ -304,21 +347,15 @@ static struct xor_block_template xor_block_sse = {
304 .do_5 = xor_sse_5, 347 .do_5 = xor_sse_5,
305}; 348};
306 349
307
308/* Also try the AVX routines */
309#include <asm/xor_avx.h>
310
311#undef XOR_TRY_TEMPLATES 350#undef XOR_TRY_TEMPLATES
312#define XOR_TRY_TEMPLATES \ 351#define XOR_TRY_TEMPLATES \
313do { \ 352do { \
314 AVX_XOR_SPEED; \
315 xor_speed(&xor_block_sse); \ 353 xor_speed(&xor_block_sse); \
316} while (0) 354} while (0)
317 355
318/* We force the use of the SSE xor block because it can write around L2. 356/* We force the use of the SSE xor block because it can write around L2.
319 We may also be able to load into the L1 only depending on how the cpu 357 We may also be able to load into the L1 only depending on how the cpu
320 deals with a load to a line that is being prefetched. */ 358 deals with a load to a line that is being prefetched. */
321#define XOR_SELECT_TEMPLATE(FASTEST) \ 359#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
322 AVX_SELECT(&xor_block_sse)
323 360
324#endif /* _ASM_X86_XOR_64_H */ 361#endif /* _ASM_X86_XOR_64_H */
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h
deleted file mode 100644
index 7ea79c5fa1f..00000000000
--- a/arch/x86/include/asm/xor_avx.h
+++ /dev/null
@@ -1,184 +0,0 @@
1#ifndef _ASM_X86_XOR_AVX_H
2#define _ASM_X86_XOR_AVX_H
3
4/*
5 * Optimized RAID-5 checksumming functions for AVX
6 *
7 * Copyright (C) 2012 Intel Corporation
8 * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
9 *
10 * Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; version 2
15 * of the License.
16 */
17
18#ifdef CONFIG_AS_AVX
19
20#include <linux/compiler.h>
21#include <asm/i387.h>
22
23#define BLOCK4(i) \
24 BLOCK(32 * i, 0) \
25 BLOCK(32 * (i + 1), 1) \
26 BLOCK(32 * (i + 2), 2) \
27 BLOCK(32 * (i + 3), 3)
28
29#define BLOCK16() \
30 BLOCK4(0) \
31 BLOCK4(4) \
32 BLOCK4(8) \
33 BLOCK4(12)
34
35static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1)
36{
37 unsigned long lines = bytes >> 9;
38
39 kernel_fpu_begin();
40
41 while (lines--) {
42#undef BLOCK
43#define BLOCK(i, reg) \
44do { \
45 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p1[i / sizeof(*p1)])); \
46 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
47 "m" (p0[i / sizeof(*p0)])); \
48 asm volatile("vmovdqa %%ymm" #reg ", %0" : \
49 "=m" (p0[i / sizeof(*p0)])); \
50} while (0);
51
52 BLOCK16()
53
54 p0 = (unsigned long *)((uintptr_t)p0 + 512);
55 p1 = (unsigned long *)((uintptr_t)p1 + 512);
56 }
57
58 kernel_fpu_end();
59}
60
61static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1,
62 unsigned long *p2)
63{
64 unsigned long lines = bytes >> 9;
65
66 kernel_fpu_begin();
67
68 while (lines--) {
69#undef BLOCK
70#define BLOCK(i, reg) \
71do { \
72 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p2[i / sizeof(*p2)])); \
73 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
74 "m" (p1[i / sizeof(*p1)])); \
75 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
76 "m" (p0[i / sizeof(*p0)])); \
77 asm volatile("vmovdqa %%ymm" #reg ", %0" : \
78 "=m" (p0[i / sizeof(*p0)])); \
79} while (0);
80
81 BLOCK16()
82
83 p0 = (unsigned long *)((uintptr_t)p0 + 512);
84 p1 = (unsigned long *)((uintptr_t)p1 + 512);
85 p2 = (unsigned long *)((uintptr_t)p2 + 512);
86 }
87
88 kernel_fpu_end();
89}
90
91static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1,
92 unsigned long *p2, unsigned long *p3)
93{
94 unsigned long lines = bytes >> 9;
95
96 kernel_fpu_begin();
97
98 while (lines--) {
99#undef BLOCK
100#define BLOCK(i, reg) \
101do { \
102 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p3[i / sizeof(*p3)])); \
103 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
104 "m" (p2[i / sizeof(*p2)])); \
105 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
106 "m" (p1[i / sizeof(*p1)])); \
107 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
108 "m" (p0[i / sizeof(*p0)])); \
109 asm volatile("vmovdqa %%ymm" #reg ", %0" : \
110 "=m" (p0[i / sizeof(*p0)])); \
111} while (0);
112
113 BLOCK16();
114
115 p0 = (unsigned long *)((uintptr_t)p0 + 512);
116 p1 = (unsigned long *)((uintptr_t)p1 + 512);
117 p2 = (unsigned long *)((uintptr_t)p2 + 512);
118 p3 = (unsigned long *)((uintptr_t)p3 + 512);
119 }
120
121 kernel_fpu_end();
122}
123
124static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1,
125 unsigned long *p2, unsigned long *p3, unsigned long *p4)
126{
127 unsigned long lines = bytes >> 9;
128
129 kernel_fpu_begin();
130
131 while (lines--) {
132#undef BLOCK
133#define BLOCK(i, reg) \
134do { \
135 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p4[i / sizeof(*p4)])); \
136 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
137 "m" (p3[i / sizeof(*p3)])); \
138 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
139 "m" (p2[i / sizeof(*p2)])); \
140 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
141 "m" (p1[i / sizeof(*p1)])); \
142 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
143 "m" (p0[i / sizeof(*p0)])); \
144 asm volatile("vmovdqa %%ymm" #reg ", %0" : \
145 "=m" (p0[i / sizeof(*p0)])); \
146} while (0);
147
148 BLOCK16()
149
150 p0 = (unsigned long *)((uintptr_t)p0 + 512);
151 p1 = (unsigned long *)((uintptr_t)p1 + 512);
152 p2 = (unsigned long *)((uintptr_t)p2 + 512);
153 p3 = (unsigned long *)((uintptr_t)p3 + 512);
154 p4 = (unsigned long *)((uintptr_t)p4 + 512);
155 }
156
157 kernel_fpu_end();
158}
159
160static struct xor_block_template xor_block_avx = {
161 .name = "avx",
162 .do_2 = xor_avx_2,
163 .do_3 = xor_avx_3,
164 .do_4 = xor_avx_4,
165 .do_5 = xor_avx_5,
166};
167
168#define AVX_XOR_SPEED \
169do { \
170 if (cpu_has_avx) \
171 xor_speed(&xor_block_avx); \
172} while (0)
173
174#define AVX_SELECT(FASTEST) \
175 (cpu_has_avx ? &xor_block_avx : FASTEST)
176
177#else
178
179#define AVX_XOR_SPEED {}
180
181#define AVX_SELECT(FASTEST) (FASTEST)
182
183#endif
184#endif
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 0415cdabb5a..c6ce2452f10 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -34,14 +34,17 @@
34extern unsigned int xstate_size; 34extern unsigned int xstate_size;
35extern u64 pcntxt_mask; 35extern u64 pcntxt_mask;
36extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; 36extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
37extern struct xsave_struct *init_xstate_buf;
38 37
39extern void xsave_init(void); 38extern void xsave_init(void);
40extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); 39extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
41extern int init_fpu(struct task_struct *child); 40extern int init_fpu(struct task_struct *child);
41extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
42 void __user *fpstate,
43 struct _fpx_sw_bytes *sw);
42 44
43static inline int fpu_xrstor_checking(struct xsave_struct *fx) 45static inline int fpu_xrstor_checking(struct fpu *fpu)
44{ 46{
47 struct xsave_struct *fx = &fpu->state->xsave;
45 int err; 48 int err;
46 49
47 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" 50 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -66,21 +69,27 @@ static inline int xsave_user(struct xsave_struct __user *buf)
66 * Clear the xsave header first, so that reserved fields are 69 * Clear the xsave header first, so that reserved fields are
67 * initialized to zero. 70 * initialized to zero.
68 */ 71 */
69 err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr)); 72 err = __clear_user(&buf->xsave_hdr,
73 sizeof(struct xsave_hdr_struct));
70 if (unlikely(err)) 74 if (unlikely(err))
71 return -EFAULT; 75 return -EFAULT;
72 76
73 __asm__ __volatile__(ASM_STAC "\n" 77 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
74 "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" 78 "2:\n"
75 "2: " ASM_CLAC "\n"
76 ".section .fixup,\"ax\"\n" 79 ".section .fixup,\"ax\"\n"
77 "3: movl $-1,%[err]\n" 80 "3: movl $-1,%[err]\n"
78 " jmp 2b\n" 81 " jmp 2b\n"
79 ".previous\n" 82 ".previous\n"
80 _ASM_EXTABLE(1b,3b) 83 ".section __ex_table,\"a\"\n"
84 _ASM_ALIGN "\n"
85 _ASM_PTR "1b,3b\n"
86 ".previous"
81 : [err] "=r" (err) 87 : [err] "=r" (err)
82 : "D" (buf), "a" (-1), "d" (-1), "0" (0) 88 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
83 : "memory"); 89 : "memory");
90 if (unlikely(err) && __clear_user(buf, xstate_size))
91 err = -EFAULT;
92 /* No need to clear here because the caller clears USED_MATH */
84 return err; 93 return err;
85} 94}
86 95
@@ -91,14 +100,16 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
91 u32 lmask = mask; 100 u32 lmask = mask;
92 u32 hmask = mask >> 32; 101 u32 hmask = mask >> 32;
93 102
94 __asm__ __volatile__(ASM_STAC "\n" 103 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
95 "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" 104 "2:\n"
96 "2: " ASM_CLAC "\n"
97 ".section .fixup,\"ax\"\n" 105 ".section .fixup,\"ax\"\n"
98 "3: movl $-1,%[err]\n" 106 "3: movl $-1,%[err]\n"
99 " jmp 2b\n" 107 " jmp 2b\n"
100 ".previous\n" 108 ".previous\n"
101 _ASM_EXTABLE(1b,3b) 109 ".section __ex_table,\"a\"\n"
110 _ASM_ALIGN "\n"
111 _ASM_PTR "1b,3b\n"
112 ".previous"
102 : [err] "=r" (err) 113 : [err] "=r" (err)
103 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) 114 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
104 : "memory"); /* memory required? */ 115 : "memory"); /* memory required? */
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
deleted file mode 100644
index 09409c44f9a..00000000000
--- a/arch/x86/include/uapi/asm/Kbuild
+++ /dev/null
@@ -1,64 +0,0 @@
1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm
3
4genhdr-y += unistd_32.h
5genhdr-y += unistd_64.h
6genhdr-y += unistd_x32.h
7header-y += a.out.h
8header-y += auxvec.h
9header-y += bitsperlong.h
10header-y += boot.h
11header-y += bootparam.h
12header-y += byteorder.h
13header-y += debugreg.h
14header-y += e820.h
15header-y += errno.h
16header-y += fcntl.h
17header-y += hw_breakpoint.h
18header-y += hyperv.h
19header-y += ioctl.h
20header-y += ioctls.h
21header-y += ipcbuf.h
22header-y += ist.h
23header-y += kvm.h
24header-y += kvm_para.h
25header-y += ldt.h
26header-y += mce.h
27header-y += mman.h
28header-y += msgbuf.h
29header-y += msr-index.h
30header-y += msr.h
31header-y += mtrr.h
32header-y += param.h
33header-y += perf_regs.h
34header-y += poll.h
35header-y += posix_types.h
36header-y += posix_types_32.h
37header-y += posix_types_64.h
38header-y += posix_types_x32.h
39header-y += prctl.h
40header-y += processor-flags.h
41header-y += ptrace-abi.h
42header-y += ptrace.h
43header-y += resource.h
44header-y += sembuf.h
45header-y += setup.h
46header-y += shmbuf.h
47header-y += sigcontext.h
48header-y += sigcontext32.h
49header-y += siginfo.h
50header-y += signal.h
51header-y += socket.h
52header-y += sockios.h
53header-y += stat.h
54header-y += statfs.h
55header-y += svm.h
56header-y += swab.h
57header-y += termbits.h
58header-y += termios.h
59header-y += types.h
60header-y += ucontext.h
61header-y += unistd.h
62header-y += vm86.h
63header-y += vmx.h
64header-y += vsyscall.h
diff --git a/arch/x86/include/uapi/asm/a.out.h b/arch/x86/include/uapi/asm/a.out.h
deleted file mode 100644
index 4684f97a5bb..00000000000
--- a/arch/x86/include/uapi/asm/a.out.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _ASM_X86_A_OUT_H
2#define _ASM_X86_A_OUT_H
3
4struct exec
5{
6 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
7 unsigned a_text; /* length of text, in bytes */
8 unsigned a_data; /* length of data, in bytes */
9 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
10 unsigned a_syms; /* length of symbol table data in file, in bytes */
11 unsigned a_entry; /* start address */
12 unsigned a_trsize; /* length of relocation info for text, in bytes */
13 unsigned a_drsize; /* length of relocation info for data, in bytes */
14};
15
16#define N_TRSIZE(a) ((a).a_trsize)
17#define N_DRSIZE(a) ((a).a_drsize)
18#define N_SYMSIZE(a) ((a).a_syms)
19
20#endif /* _ASM_X86_A_OUT_H */
diff --git a/arch/x86/include/uapi/asm/auxvec.h b/arch/x86/include/uapi/asm/auxvec.h
deleted file mode 100644
index 77203ac352d..00000000000
--- a/arch/x86/include/uapi/asm/auxvec.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _ASM_X86_AUXVEC_H
2#define _ASM_X86_AUXVEC_H
3/*
4 * Architecture-neutral AT_ values in 0-17, leave some room
5 * for more of them, start the x86-specific ones at 32.
6 */
7#ifdef __i386__
8#define AT_SYSINFO 32
9#endif
10#define AT_SYSINFO_EHDR 33
11
12/* entries in ARCH_DLINFO: */
13#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
14# define AT_VECTOR_SIZE_ARCH 2
15#else /* else it's non-compat x86-64 */
16# define AT_VECTOR_SIZE_ARCH 1
17#endif
18
19#endif /* _ASM_X86_AUXVEC_H */
diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index b0ae1c4dc79..00000000000
--- a/arch/x86/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_X86_BITSPERLONG_H
2#define __ASM_X86_BITSPERLONG_H
3
4#ifdef __x86_64__
5# define __BITS_PER_LONG 64
6#else
7# define __BITS_PER_LONG 32
8#endif
9
10#include <asm-generic/bitsperlong.h>
11
12#endif /* __ASM_X86_BITSPERLONG_H */
13
diff --git a/arch/x86/include/uapi/asm/boot.h b/arch/x86/include/uapi/asm/boot.h
deleted file mode 100644
index 94292c4c812..00000000000
--- a/arch/x86/include/uapi/asm/boot.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _UAPI_ASM_X86_BOOT_H
2#define _UAPI_ASM_X86_BOOT_H
3
4/* Internal svga startup constants */
5#define NORMAL_VGA 0xffff /* 80x25 mode */
6#define EXTENDED_VGA 0xfffe /* 80x50 mode */
7#define ASK_VGA 0xfffd /* ask for it at bootup */
8
9
10#endif /* _UAPI_ASM_X86_BOOT_H */
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
deleted file mode 100644
index 92862cd9020..00000000000
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ /dev/null
@@ -1,139 +0,0 @@
1#ifndef _ASM_X86_BOOTPARAM_H
2#define _ASM_X86_BOOTPARAM_H
3
4#include <linux/types.h>
5#include <linux/screen_info.h>
6#include <linux/apm_bios.h>
7#include <linux/edd.h>
8#include <asm/e820.h>
9#include <asm/ist.h>
10#include <video/edid.h>
11
12/* setup data types */
13#define SETUP_NONE 0
14#define SETUP_E820_EXT 1
15#define SETUP_DTB 2
16#define SETUP_PCI 3
17
18/* extensible setup data list node */
19struct setup_data {
20 __u64 next;
21 __u32 type;
22 __u32 len;
23 __u8 data[0];
24};
25
26struct setup_header {
27 __u8 setup_sects;
28 __u16 root_flags;
29 __u32 syssize;
30 __u16 ram_size;
31#define RAMDISK_IMAGE_START_MASK 0x07FF
32#define RAMDISK_PROMPT_FLAG 0x8000
33#define RAMDISK_LOAD_FLAG 0x4000
34 __u16 vid_mode;
35 __u16 root_dev;
36 __u16 boot_flag;
37 __u16 jump;
38 __u32 header;
39 __u16 version;
40 __u32 realmode_swtch;
41 __u16 start_sys;
42 __u16 kernel_version;
43 __u8 type_of_loader;
44 __u8 loadflags;
45#define LOADED_HIGH (1<<0)
46#define QUIET_FLAG (1<<5)
47#define KEEP_SEGMENTS (1<<6)
48#define CAN_USE_HEAP (1<<7)
49 __u16 setup_move_size;
50 __u32 code32_start;
51 __u32 ramdisk_image;
52 __u32 ramdisk_size;
53 __u32 bootsect_kludge;
54 __u16 heap_end_ptr;
55 __u8 ext_loader_ver;
56 __u8 ext_loader_type;
57 __u32 cmd_line_ptr;
58 __u32 initrd_addr_max;
59 __u32 kernel_alignment;
60 __u8 relocatable_kernel;
61 __u8 _pad2[3];
62 __u32 cmdline_size;
63 __u32 hardware_subarch;
64 __u64 hardware_subarch_data;
65 __u32 payload_offset;
66 __u32 payload_length;
67 __u64 setup_data;
68 __u64 pref_address;
69 __u32 init_size;
70 __u32 handover_offset;
71} __attribute__((packed));
72
73struct sys_desc_table {
74 __u16 length;
75 __u8 table[14];
76};
77
78/* Gleaned from OFW's set-parameters in cpu/x86/pc/linux.fth */
79struct olpc_ofw_header {
80 __u32 ofw_magic; /* OFW signature */
81 __u32 ofw_version;
82 __u32 cif_handler; /* callback into OFW */
83 __u32 irq_desc_table;
84} __attribute__((packed));
85
86struct efi_info {
87 __u32 efi_loader_signature;
88 __u32 efi_systab;
89 __u32 efi_memdesc_size;
90 __u32 efi_memdesc_version;
91 __u32 efi_memmap;
92 __u32 efi_memmap_size;
93 __u32 efi_systab_hi;
94 __u32 efi_memmap_hi;
95};
96
97/* The so-called "zeropage" */
98struct boot_params {
99 struct screen_info screen_info; /* 0x000 */
100 struct apm_bios_info apm_bios_info; /* 0x040 */
101 __u8 _pad2[4]; /* 0x054 */
102 __u64 tboot_addr; /* 0x058 */
103 struct ist_info ist_info; /* 0x060 */
104 __u8 _pad3[16]; /* 0x070 */
105 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
106 __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
107 struct sys_desc_table sys_desc_table; /* 0x0a0 */
108 struct olpc_ofw_header olpc_ofw_header; /* 0x0b0 */
109 __u8 _pad4[128]; /* 0x0c0 */
110 struct edid_info edid_info; /* 0x140 */
111 struct efi_info efi_info; /* 0x1c0 */
112 __u32 alt_mem_k; /* 0x1e0 */
113 __u32 scratch; /* Scratch field! */ /* 0x1e4 */
114 __u8 e820_entries; /* 0x1e8 */
115 __u8 eddbuf_entries; /* 0x1e9 */
116 __u8 edd_mbr_sig_buf_entries; /* 0x1ea */
117 __u8 kbd_status; /* 0x1eb */
118 __u8 _pad6[5]; /* 0x1ec */
119 struct setup_header hdr; /* setup header */ /* 0x1f1 */
120 __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
121 __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
122 struct e820entry e820_map[E820MAX]; /* 0x2d0 */
123 __u8 _pad8[48]; /* 0xcd0 */
124 struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */
125 __u8 _pad9[276]; /* 0xeec */
126} __attribute__((packed));
127
128enum {
129 X86_SUBARCH_PC = 0,
130 X86_SUBARCH_LGUEST,
131 X86_SUBARCH_XEN,
132 X86_SUBARCH_MRST,
133 X86_SUBARCH_CE4100,
134 X86_NR_SUBARCHS,
135};
136
137
138
139#endif /* _ASM_X86_BOOTPARAM_H */
diff --git a/arch/x86/include/uapi/asm/byteorder.h b/arch/x86/include/uapi/asm/byteorder.h
deleted file mode 100644
index b13a7a88f3e..00000000000
--- a/arch/x86/include/uapi/asm/byteorder.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_X86_BYTEORDER_H
2#define _ASM_X86_BYTEORDER_H
3
4#include <linux/byteorder/little_endian.h>
5
6#endif /* _ASM_X86_BYTEORDER_H */
diff --git a/arch/x86/include/uapi/asm/debugreg.h b/arch/x86/include/uapi/asm/debugreg.h
deleted file mode 100644
index 3c0874dd986..00000000000
--- a/arch/x86/include/uapi/asm/debugreg.h
+++ /dev/null
@@ -1,80 +0,0 @@
1#ifndef _UAPI_ASM_X86_DEBUGREG_H
2#define _UAPI_ASM_X86_DEBUGREG_H
3
4
5/* Indicate the register numbers for a number of the specific
6 debug registers. Registers 0-3 contain the addresses we wish to trap on */
7#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
8#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
9
10#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
11#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
12
13/* Define a few things for the status register. We can use this to determine
14 which debugging register was responsible for the trap. The other bits
15 are either reserved or not of interest to us. */
16
17/* Define reserved bits in DR6 which are always set to 1 */
18#define DR6_RESERVED (0xFFFF0FF0)
19
20#define DR_TRAP0 (0x1) /* db0 */
21#define DR_TRAP1 (0x2) /* db1 */
22#define DR_TRAP2 (0x4) /* db2 */
23#define DR_TRAP3 (0x8) /* db3 */
24#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
25
26#define DR_STEP (0x4000) /* single-step */
27#define DR_SWITCH (0x8000) /* task switch */
28
29/* Now define a bunch of things for manipulating the control register.
30 The top two bytes of the control register consist of 4 fields of 4
31 bits - each field corresponds to one of the four debug registers,
32 and indicates what types of access we trap on, and how large the data
33 field is that we are looking at */
34
35#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
36#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
37
38#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
39#define DR_RW_WRITE (0x1)
40#define DR_RW_READ (0x3)
41
42#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
43#define DR_LEN_2 (0x4)
44#define DR_LEN_4 (0xC)
45#define DR_LEN_8 (0x8)
46
47/* The low byte to the control register determine which registers are
48 enabled. There are 4 fields of two bits. One bit is "local", meaning
49 that the processor will reset the bit after a task switch and the other
50 is global meaning that we have to explicitly reset the bit. With linux,
51 you can use either one, since we explicitly zero the register when we enter
52 kernel mode. */
53
54#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
55#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
56#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */
57#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */
58#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
59
60#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
61#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
62
63/* The second byte to the control register has a few special things.
64 We can slow the instruction pipeline for instructions coming via the
65 gdt or the ldt if we want to. I am not sure why this is an advantage */
66
67#ifdef __i386__
68#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
69#else
70#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
71#endif
72
73#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
74#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
75
76/*
77 * HW breakpoint additions
78 */
79
80#endif /* _UAPI_ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
deleted file mode 100644
index bbae0247070..00000000000
--- a/arch/x86/include/uapi/asm/e820.h
+++ /dev/null
@@ -1,75 +0,0 @@
1#ifndef _UAPI_ASM_X86_E820_H
2#define _UAPI_ASM_X86_E820_H
3#define E820MAP 0x2d0 /* our map */
4#define E820MAX 128 /* number of entries in E820MAP */
5
6/*
7 * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
8 * constrained space in the zeropage. If we have more nodes than
9 * that, and if we've booted off EFI firmware, then the EFI tables
10 * passed us from the EFI firmware can list more nodes. Size our
11 * internal memory map tables to have room for these additional
12 * nodes, based on up to three entries per node for which the
13 * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
14 * plus E820MAX, allowing space for the possible duplicate E820
15 * entries that might need room in the same arrays, prior to the
16 * call to sanitize_e820_map() to remove duplicates. The allowance
17 * of three memory map entries per node is "enough" entries for
18 * the initial hardware platform motivating this mechanism to make
19 * use of additional EFI map entries. Future platforms may want
20 * to allow more than three entries per node or otherwise refine
21 * this size.
22 */
23
24/*
25 * Odd: 'make headers_check' complains about numa.h if I try
26 * to collapse the next two #ifdef lines to a single line:
27 * #if defined(__KERNEL__) && defined(CONFIG_EFI)
28 */
29#ifndef __KERNEL__
30#define E820_X_MAX E820MAX
31#endif
32
33#define E820NR 0x1e8 /* # entries in E820MAP */
34
35#define E820_RAM 1
36#define E820_RESERVED 2
37#define E820_ACPI 3
38#define E820_NVS 4
39#define E820_UNUSABLE 5
40
41
42/*
43 * reserved RAM used by kernel itself
44 * if CONFIG_INTEL_TXT is enabled, memory of this type will be
45 * included in the S3 integrity calculation and so should not include
46 * any memory that BIOS might alter over the S3 transition
47 */
48#define E820_RESERVED_KERN 128
49
50#ifndef __ASSEMBLY__
51#include <linux/types.h>
52struct e820entry {
53 __u64 addr; /* start of memory segment */
54 __u64 size; /* size of memory segment */
55 __u32 type; /* type of memory segment */
56} __attribute__((packed));
57
58struct e820map {
59 __u32 nr_map;
60 struct e820entry map[E820_X_MAX];
61};
62
63#define ISA_START_ADDRESS 0xa0000
64#define ISA_END_ADDRESS 0x100000
65
66#define BIOS_BEGIN 0x000a0000
67#define BIOS_END 0x00100000
68
69#define BIOS_ROM_BASE 0xffe00000
70#define BIOS_ROM_END 0xffffffff
71
72#endif /* __ASSEMBLY__ */
73
74
75#endif /* _UAPI_ASM_X86_E820_H */
diff --git a/arch/x86/include/uapi/asm/errno.h b/arch/x86/include/uapi/asm/errno.h
deleted file mode 100644
index 4c82b503d92..00000000000
--- a/arch/x86/include/uapi/asm/errno.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/errno.h>
diff --git a/arch/x86/include/uapi/asm/fcntl.h b/arch/x86/include/uapi/asm/fcntl.h
deleted file mode 100644
index 46ab12db573..00000000000
--- a/arch/x86/include/uapi/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/fcntl.h>
diff --git a/arch/x86/include/uapi/asm/hw_breakpoint.h b/arch/x86/include/uapi/asm/hw_breakpoint.h
deleted file mode 100644
index 79a9626b550..00000000000
--- a/arch/x86/include/uapi/asm/hw_breakpoint.h
+++ /dev/null
@@ -1 +0,0 @@
1/* */
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
deleted file mode 100644
index b80420bcd09..00000000000
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ /dev/null
@@ -1,194 +0,0 @@
1#ifndef _ASM_X86_HYPERV_H
2#define _ASM_X86_HYPERV_H
3
4#include <linux/types.h>
5
6/*
7 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
8 * is set by CPUID(HvCpuIdFunctionVersionAndFeatures).
9 */
10#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
11#define HYPERV_CPUID_INTERFACE 0x40000001
12#define HYPERV_CPUID_VERSION 0x40000002
13#define HYPERV_CPUID_FEATURES 0x40000003
14#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
15#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
16
17#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
18#define HYPERV_CPUID_MIN 0x40000005
19#define HYPERV_CPUID_MAX 0x4000ffff
20
21/*
22 * Feature identification. EAX indicates which features are available
23 * to the partition based upon the current partition privileges.
24 */
25
26/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */
27#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
28/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
29#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
30/*
31 * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
32 * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
33 */
34#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2)
35/*
36 * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through
37 * HV_X64_MSR_STIMER3_COUNT) available
38 */
39#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3)
40/*
41 * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
42 * are available
43 */
44#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4)
45/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/
46#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5)
47/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/
48#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6)
49/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/
50#define HV_X64_MSR_RESET_AVAILABLE (1 << 7)
51 /*
52 * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE,
53 * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE,
54 * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available
55 */
56#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8)
57
58/*
59 * Feature identification: EBX indicates which flags were specified at
60 * partition creation. The format is the same as the partition creation
61 * flag structure defined in section Partition Creation Flags.
62 */
63#define HV_X64_CREATE_PARTITIONS (1 << 0)
64#define HV_X64_ACCESS_PARTITION_ID (1 << 1)
65#define HV_X64_ACCESS_MEMORY_POOL (1 << 2)
66#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3)
67#define HV_X64_POST_MESSAGES (1 << 4)
68#define HV_X64_SIGNAL_EVENTS (1 << 5)
69#define HV_X64_CREATE_PORT (1 << 6)
70#define HV_X64_CONNECT_PORT (1 << 7)
71#define HV_X64_ACCESS_STATS (1 << 8)
72#define HV_X64_DEBUGGING (1 << 11)
73#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12)
74#define HV_X64_CONFIGURE_PROFILER (1 << 13)
75
76/*
77 * Feature identification. EDX indicates which miscellaneous features
78 * are available to the partition.
79 */
80/* The MWAIT instruction is available (per section MONITOR / MWAIT) */
81#define HV_X64_MWAIT_AVAILABLE (1 << 0)
82/* Guest debugging support is available */
83#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1)
84/* Performance Monitor support is available*/
85#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2)
86/* Support for physical CPU dynamic partitioning events is available*/
87#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3)
88/*
89 * Support for passing hypercall input parameter block via XMM
90 * registers is available
91 */
92#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
93/* Support for a virtual guest idle state is available */
94#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
95
96/*
97 * Implementation recommendations. Indicates which behaviors the hypervisor
98 * recommends the OS implement for optimal performance.
99 */
100 /*
101 * Recommend using hypercall for address space switches rather
102 * than MOV to CR3 instruction
103 */
104#define HV_X64_MWAIT_RECOMMENDED (1 << 0)
105/* Recommend using hypercall for local TLB flushes rather
106 * than INVLPG or MOV to CR3 instructions */
107#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1)
108/*
109 * Recommend using hypercall for remote TLB flushes rather
110 * than inter-processor interrupts
111 */
112#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2)
113/*
114 * Recommend using MSRs for accessing APIC registers
115 * EOI, ICR and TPR rather than their memory-mapped counterparts
116 */
117#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3)
118/* Recommend using the hypervisor-provided MSR to initiate a system RESET */
119#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4)
120/*
121 * Recommend using relaxed timing for this partition. If used,
122 * the VM should disable any watchdog timeouts that rely on the
123 * timely delivery of external interrupts
124 */
125#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
126
127/* MSR used to identify the guest OS. */
128#define HV_X64_MSR_GUEST_OS_ID 0x40000000
129
130/* MSR used to setup pages used to communicate with the hypervisor. */
131#define HV_X64_MSR_HYPERCALL 0x40000001
132
133/* MSR used to provide vcpu index */
134#define HV_X64_MSR_VP_INDEX 0x40000002
135
136/* MSR used to read the per-partition time reference counter */
137#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
138
139/* Define the virtual APIC registers */
140#define HV_X64_MSR_EOI 0x40000070
141#define HV_X64_MSR_ICR 0x40000071
142#define HV_X64_MSR_TPR 0x40000072
143#define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073
144
145/* Define synthetic interrupt controller model specific registers. */
146#define HV_X64_MSR_SCONTROL 0x40000080
147#define HV_X64_MSR_SVERSION 0x40000081
148#define HV_X64_MSR_SIEFP 0x40000082
149#define HV_X64_MSR_SIMP 0x40000083
150#define HV_X64_MSR_EOM 0x40000084
151#define HV_X64_MSR_SINT0 0x40000090
152#define HV_X64_MSR_SINT1 0x40000091
153#define HV_X64_MSR_SINT2 0x40000092
154#define HV_X64_MSR_SINT3 0x40000093
155#define HV_X64_MSR_SINT4 0x40000094
156#define HV_X64_MSR_SINT5 0x40000095
157#define HV_X64_MSR_SINT6 0x40000096
158#define HV_X64_MSR_SINT7 0x40000097
159#define HV_X64_MSR_SINT8 0x40000098
160#define HV_X64_MSR_SINT9 0x40000099
161#define HV_X64_MSR_SINT10 0x4000009A
162#define HV_X64_MSR_SINT11 0x4000009B
163#define HV_X64_MSR_SINT12 0x4000009C
164#define HV_X64_MSR_SINT13 0x4000009D
165#define HV_X64_MSR_SINT14 0x4000009E
166#define HV_X64_MSR_SINT15 0x4000009F
167
168
169#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
170#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
171#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
172 (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
173
174/* Declare the various hypercall operations. */
175#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008
176
177#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001
178#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12
179#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \
180 (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
181
182#define HV_PROCESSOR_POWER_STATE_C0 0
183#define HV_PROCESSOR_POWER_STATE_C1 1
184#define HV_PROCESSOR_POWER_STATE_C2 2
185#define HV_PROCESSOR_POWER_STATE_C3 3
186
187/* hypercall status code */
188#define HV_STATUS_SUCCESS 0
189#define HV_STATUS_INVALID_HYPERCALL_CODE 2
190#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
191#define HV_STATUS_INVALID_ALIGNMENT 4
192#define HV_STATUS_INSUFFICIENT_BUFFERS 19
193
194#endif
diff --git a/arch/x86/include/uapi/asm/ioctl.h b/arch/x86/include/uapi/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe..00000000000
--- a/arch/x86/include/uapi/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/arch/x86/include/uapi/asm/ioctls.h b/arch/x86/include/uapi/asm/ioctls.h
deleted file mode 100644
index ec34c760665..00000000000
--- a/arch/x86/include/uapi/asm/ioctls.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctls.h>
diff --git a/arch/x86/include/uapi/asm/ipcbuf.h b/arch/x86/include/uapi/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d..00000000000
--- a/arch/x86/include/uapi/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ipcbuf.h>
diff --git a/arch/x86/include/uapi/asm/ist.h b/arch/x86/include/uapi/asm/ist.h
deleted file mode 100644
index bad9f5ea407..00000000000
--- a/arch/x86/include/uapi/asm/ist.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Include file for the interface to IST BIOS
3 * Copyright 2002 Andy Grover <andrew.grover@intel.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15#ifndef _UAPI_ASM_X86_IST_H
16#define _UAPI_ASM_X86_IST_H
17
18
19
20#include <linux/types.h>
21
22struct ist_info {
23 __u32 signature;
24 __u32 command;
25 __u32 event;
26 __u32 perf_level;
27};
28
29#endif /* _UAPI_ASM_X86_IST_H */
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
deleted file mode 100644
index a65ec29e6ff..00000000000
--- a/arch/x86/include/uapi/asm/kvm.h
+++ /dev/null
@@ -1,346 +0,0 @@
1#ifndef _ASM_X86_KVM_H
2#define _ASM_X86_KVM_H
3
4/*
5 * KVM x86 specific structures and definitions
6 *
7 */
8
9#include <linux/types.h>
10#include <linux/ioctl.h>
11
12#define DE_VECTOR 0
13#define DB_VECTOR 1
14#define BP_VECTOR 3
15#define OF_VECTOR 4
16#define BR_VECTOR 5
17#define UD_VECTOR 6
18#define NM_VECTOR 7
19#define DF_VECTOR 8
20#define TS_VECTOR 10
21#define NP_VECTOR 11
22#define SS_VECTOR 12
23#define GP_VECTOR 13
24#define PF_VECTOR 14
25#define MF_VECTOR 16
26#define MC_VECTOR 18
27
28/* Select x86 specific features in <linux/kvm.h> */
29#define __KVM_HAVE_PIT
30#define __KVM_HAVE_IOAPIC
31#define __KVM_HAVE_IRQ_LINE
32#define __KVM_HAVE_DEVICE_ASSIGNMENT
33#define __KVM_HAVE_MSI
34#define __KVM_HAVE_USER_NMI
35#define __KVM_HAVE_GUEST_DEBUG
36#define __KVM_HAVE_MSIX
37#define __KVM_HAVE_MCE
38#define __KVM_HAVE_PIT_STATE2
39#define __KVM_HAVE_XEN_HVM
40#define __KVM_HAVE_VCPU_EVENTS
41#define __KVM_HAVE_DEBUGREGS
42#define __KVM_HAVE_XSAVE
43#define __KVM_HAVE_XCRS
44#define __KVM_HAVE_READONLY_MEM
45
46/* Architectural interrupt line count. */
47#define KVM_NR_INTERRUPTS 256
48
49struct kvm_memory_alias {
50 __u32 slot; /* this has a different namespace than memory slots */
51 __u32 flags;
52 __u64 guest_phys_addr;
53 __u64 memory_size;
54 __u64 target_phys_addr;
55};
56
57/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
58struct kvm_pic_state {
59 __u8 last_irr; /* edge detection */
60 __u8 irr; /* interrupt request register */
61 __u8 imr; /* interrupt mask register */
62 __u8 isr; /* interrupt service register */
63 __u8 priority_add; /* highest irq priority */
64 __u8 irq_base;
65 __u8 read_reg_select;
66 __u8 poll;
67 __u8 special_mask;
68 __u8 init_state;
69 __u8 auto_eoi;
70 __u8 rotate_on_auto_eoi;
71 __u8 special_fully_nested_mode;
72 __u8 init4; /* true if 4 byte init */
73 __u8 elcr; /* PIIX edge/trigger selection */
74 __u8 elcr_mask;
75};
76
77#define KVM_IOAPIC_NUM_PINS 24
78struct kvm_ioapic_state {
79 __u64 base_address;
80 __u32 ioregsel;
81 __u32 id;
82 __u32 irr;
83 __u32 pad;
84 union {
85 __u64 bits;
86 struct {
87 __u8 vector;
88 __u8 delivery_mode:3;
89 __u8 dest_mode:1;
90 __u8 delivery_status:1;
91 __u8 polarity:1;
92 __u8 remote_irr:1;
93 __u8 trig_mode:1;
94 __u8 mask:1;
95 __u8 reserve:7;
96 __u8 reserved[4];
97 __u8 dest_id;
98 } fields;
99 } redirtbl[KVM_IOAPIC_NUM_PINS];
100};
101
102#define KVM_IRQCHIP_PIC_MASTER 0
103#define KVM_IRQCHIP_PIC_SLAVE 1
104#define KVM_IRQCHIP_IOAPIC 2
105#define KVM_NR_IRQCHIPS 3
106
107/* for KVM_GET_REGS and KVM_SET_REGS */
108struct kvm_regs {
109 /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
110 __u64 rax, rbx, rcx, rdx;
111 __u64 rsi, rdi, rsp, rbp;
112 __u64 r8, r9, r10, r11;
113 __u64 r12, r13, r14, r15;
114 __u64 rip, rflags;
115};
116
117/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
118#define KVM_APIC_REG_SIZE 0x400
119struct kvm_lapic_state {
120 char regs[KVM_APIC_REG_SIZE];
121};
122
123struct kvm_segment {
124 __u64 base;
125 __u32 limit;
126 __u16 selector;
127 __u8 type;
128 __u8 present, dpl, db, s, l, g, avl;
129 __u8 unusable;
130 __u8 padding;
131};
132
133struct kvm_dtable {
134 __u64 base;
135 __u16 limit;
136 __u16 padding[3];
137};
138
139
140/* for KVM_GET_SREGS and KVM_SET_SREGS */
141struct kvm_sregs {
142 /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
143 struct kvm_segment cs, ds, es, fs, gs, ss;
144 struct kvm_segment tr, ldt;
145 struct kvm_dtable gdt, idt;
146 __u64 cr0, cr2, cr3, cr4, cr8;
147 __u64 efer;
148 __u64 apic_base;
149 __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
150};
151
152/* for KVM_GET_FPU and KVM_SET_FPU */
153struct kvm_fpu {
154 __u8 fpr[8][16];
155 __u16 fcw;
156 __u16 fsw;
157 __u8 ftwx; /* in fxsave format */
158 __u8 pad1;
159 __u16 last_opcode;
160 __u64 last_ip;
161 __u64 last_dp;
162 __u8 xmm[16][16];
163 __u32 mxcsr;
164 __u32 pad2;
165};
166
167struct kvm_msr_entry {
168 __u32 index;
169 __u32 reserved;
170 __u64 data;
171};
172
173/* for KVM_GET_MSRS and KVM_SET_MSRS */
174struct kvm_msrs {
175 __u32 nmsrs; /* number of msrs in entries */
176 __u32 pad;
177
178 struct kvm_msr_entry entries[0];
179};
180
181/* for KVM_GET_MSR_INDEX_LIST */
182struct kvm_msr_list {
183 __u32 nmsrs; /* number of msrs in entries */
184 __u32 indices[0];
185};
186
187
188struct kvm_cpuid_entry {
189 __u32 function;
190 __u32 eax;
191 __u32 ebx;
192 __u32 ecx;
193 __u32 edx;
194 __u32 padding;
195};
196
197/* for KVM_SET_CPUID */
198struct kvm_cpuid {
199 __u32 nent;
200 __u32 padding;
201 struct kvm_cpuid_entry entries[0];
202};
203
204struct kvm_cpuid_entry2 {
205 __u32 function;
206 __u32 index;
207 __u32 flags;
208 __u32 eax;
209 __u32 ebx;
210 __u32 ecx;
211 __u32 edx;
212 __u32 padding[3];
213};
214
215#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
216#define KVM_CPUID_FLAG_STATEFUL_FUNC 2
217#define KVM_CPUID_FLAG_STATE_READ_NEXT 4
218
219/* for KVM_SET_CPUID2 */
220struct kvm_cpuid2 {
221 __u32 nent;
222 __u32 padding;
223 struct kvm_cpuid_entry2 entries[0];
224};
225
226/* for KVM_GET_PIT and KVM_SET_PIT */
227struct kvm_pit_channel_state {
228 __u32 count; /* can be 65536 */
229 __u16 latched_count;
230 __u8 count_latched;
231 __u8 status_latched;
232 __u8 status;
233 __u8 read_state;
234 __u8 write_state;
235 __u8 write_latch;
236 __u8 rw_mode;
237 __u8 mode;
238 __u8 bcd;
239 __u8 gate;
240 __s64 count_load_time;
241};
242
243struct kvm_debug_exit_arch {
244 __u32 exception;
245 __u32 pad;
246 __u64 pc;
247 __u64 dr6;
248 __u64 dr7;
249};
250
251#define KVM_GUESTDBG_USE_SW_BP 0x00010000
252#define KVM_GUESTDBG_USE_HW_BP 0x00020000
253#define KVM_GUESTDBG_INJECT_DB 0x00040000
254#define KVM_GUESTDBG_INJECT_BP 0x00080000
255
256/* for KVM_SET_GUEST_DEBUG */
257struct kvm_guest_debug_arch {
258 __u64 debugreg[8];
259};
260
261struct kvm_pit_state {
262 struct kvm_pit_channel_state channels[3];
263};
264
265#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
266
267struct kvm_pit_state2 {
268 struct kvm_pit_channel_state channels[3];
269 __u32 flags;
270 __u32 reserved[9];
271};
272
273struct kvm_reinject_control {
274 __u8 pit_reinject;
275 __u8 reserved[31];
276};
277
278/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
279#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
280#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
281#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
282
283/* Interrupt shadow states */
284#define KVM_X86_SHADOW_INT_MOV_SS 0x01
285#define KVM_X86_SHADOW_INT_STI 0x02
286
287/* for KVM_GET/SET_VCPU_EVENTS */
288struct kvm_vcpu_events {
289 struct {
290 __u8 injected;
291 __u8 nr;
292 __u8 has_error_code;
293 __u8 pad;
294 __u32 error_code;
295 } exception;
296 struct {
297 __u8 injected;
298 __u8 nr;
299 __u8 soft;
300 __u8 shadow;
301 } interrupt;
302 struct {
303 __u8 injected;
304 __u8 pending;
305 __u8 masked;
306 __u8 pad;
307 } nmi;
308 __u32 sipi_vector;
309 __u32 flags;
310 __u32 reserved[10];
311};
312
313/* for KVM_GET/SET_DEBUGREGS */
314struct kvm_debugregs {
315 __u64 db[4];
316 __u64 dr6;
317 __u64 dr7;
318 __u64 flags;
319 __u64 reserved[9];
320};
321
322/* for KVM_CAP_XSAVE */
323struct kvm_xsave {
324 __u32 region[1024];
325};
326
327#define KVM_MAX_XCRS 16
328
329struct kvm_xcr {
330 __u32 xcr;
331 __u32 reserved;
332 __u64 value;
333};
334
335struct kvm_xcrs {
336 __u32 nr_xcrs;
337 __u32 flags;
338 struct kvm_xcr xcrs[KVM_MAX_XCRS];
339 __u64 padding[16];
340};
341
342/* definition of registers in kvm_run */
343struct kvm_sync_regs {
344};
345
346#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 06fdbd987e9..00000000000
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,100 +0,0 @@
1#ifndef _UAPI_ASM_X86_KVM_PARA_H
2#define _UAPI_ASM_X86_KVM_PARA_H
3
4#include <linux/types.h>
5#include <asm/hyperv.h>
6
7/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
8 * should be used to determine that a VM is running under KVM.
9 */
10#define KVM_CPUID_SIGNATURE 0x40000000
11
12/* This CPUID returns a feature bitmap in eax. Before enabling a particular
13 * paravirtualization, the appropriate feature bit should be checked.
14 */
15#define KVM_CPUID_FEATURES 0x40000001
16#define KVM_FEATURE_CLOCKSOURCE 0
17#define KVM_FEATURE_NOP_IO_DELAY 1
18#define KVM_FEATURE_MMU_OP 2
19/* This indicates that the new set of kvmclock msrs
20 * are available. The use of 0x11 and 0x12 is deprecated
21 */
22#define KVM_FEATURE_CLOCKSOURCE2 3
23#define KVM_FEATURE_ASYNC_PF 4
24#define KVM_FEATURE_STEAL_TIME 5
25#define KVM_FEATURE_PV_EOI 6
26
27/* The last 8 bits are used to indicate how to interpret the flags field
28 * in pvclock structure. If no bits are set, all flags are ignored.
29 */
30#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
31
32#define MSR_KVM_WALL_CLOCK 0x11
33#define MSR_KVM_SYSTEM_TIME 0x12
34
35#define KVM_MSR_ENABLED 1
36/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
37#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
38#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
39#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
40#define MSR_KVM_STEAL_TIME 0x4b564d03
41#define MSR_KVM_PV_EOI_EN 0x4b564d04
42
43struct kvm_steal_time {
44 __u64 steal;
45 __u32 version;
46 __u32 flags;
47 __u32 pad[12];
48};
49
50#define KVM_STEAL_ALIGNMENT_BITS 5
51#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
52#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
53
54#define KVM_MAX_MMU_OP_BATCH 32
55
56#define KVM_ASYNC_PF_ENABLED (1 << 0)
57#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
58
59/* Operations for KVM_HC_MMU_OP */
60#define KVM_MMU_OP_WRITE_PTE 1
61#define KVM_MMU_OP_FLUSH_TLB 2
62#define KVM_MMU_OP_RELEASE_PT 3
63
64/* Payload for KVM_HC_MMU_OP */
65struct kvm_mmu_op_header {
66 __u32 op;
67 __u32 pad;
68};
69
70struct kvm_mmu_op_write_pte {
71 struct kvm_mmu_op_header header;
72 __u64 pte_phys;
73 __u64 pte_val;
74};
75
76struct kvm_mmu_op_flush_tlb {
77 struct kvm_mmu_op_header header;
78};
79
80struct kvm_mmu_op_release_pt {
81 struct kvm_mmu_op_header header;
82 __u64 pt_phys;
83};
84
85#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
86#define KVM_PV_REASON_PAGE_READY 2
87
88struct kvm_vcpu_pv_apf_data {
89 __u32 reason;
90 __u8 pad[60];
91 __u32 enabled;
92};
93
94#define KVM_PV_EOI_BIT 0
95#define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT)
96#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
97#define KVM_PV_EOI_DISABLED 0x0
98
99
100#endif /* _UAPI_ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
deleted file mode 100644
index 46727eb37bf..00000000000
--- a/arch/x86/include/uapi/asm/ldt.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * ldt.h
3 *
4 * Definitions of structures used with the modify_ldt system call.
5 */
6#ifndef _ASM_X86_LDT_H
7#define _ASM_X86_LDT_H
8
9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192
11/* The size of each LDT entry. */
12#define LDT_ENTRY_SIZE 8
13
14#ifndef __ASSEMBLY__
15/*
16 * Note on 64bit base and limit is ignored and you cannot set DS/ES/CS
17 * not to the default values if you still want to do syscalls. This
18 * call is more for 32bit mode therefore.
19 */
20struct user_desc {
21 unsigned int entry_number;
22 unsigned int base_addr;
23 unsigned int limit;
24 unsigned int seg_32bit:1;
25 unsigned int contents:2;
26 unsigned int read_exec_only:1;
27 unsigned int limit_in_pages:1;
28 unsigned int seg_not_present:1;
29 unsigned int useable:1;
30#ifdef __x86_64__
31 unsigned int lm:1;
32#endif
33};
34
35#define MODIFY_LDT_CONTENTS_DATA 0
36#define MODIFY_LDT_CONTENTS_STACK 1
37#define MODIFY_LDT_CONTENTS_CODE 2
38
39#endif /* !__ASSEMBLY__ */
40#endif /* _ASM_X86_LDT_H */
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
deleted file mode 100644
index 58c829871c3..00000000000
--- a/arch/x86/include/uapi/asm/mce.h
+++ /dev/null
@@ -1,121 +0,0 @@
1#ifndef _UAPI_ASM_X86_MCE_H
2#define _UAPI_ASM_X86_MCE_H
3
4#include <linux/types.h>
5#include <asm/ioctls.h>
6
7/*
8 * Machine Check support for x86
9 */
10
11/* MCG_CAP register defines */
12#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
13#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
14#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
15#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
16#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
17#define MCG_EXT_CNT_SHIFT 16
18#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
19#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
20
21/* MCG_STATUS register defines */
22#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
23#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
24#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
25
26/* MCi_STATUS register defines */
27#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
28#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
29#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
30#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
31#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
32#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
33#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
34#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
35#define MCI_STATUS_AR (1ULL<<55) /* Action required */
36#define MCACOD 0xffff /* MCA Error Code */
37
38/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
39#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
40#define MCACOD_SCRUBMSK 0xfff0
41#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
42#define MCACOD_DATA 0x0134 /* Data Load */
43#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
44
45/* MCi_MISC register defines */
46#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
47#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
48#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
49#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
50#define MCI_MISC_ADDR_PHYS 2 /* physical address */
51#define MCI_MISC_ADDR_MEM 3 /* memory address */
52#define MCI_MISC_ADDR_GENERIC 7 /* generic */
53
54/* CTL2 register defines */
55#define MCI_CTL2_CMCI_EN (1ULL << 30)
56#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
57
58#define MCJ_CTX_MASK 3
59#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
60#define MCJ_CTX_RANDOM 0 /* inject context: random */
61#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
62#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
63#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
64#define MCJ_EXCEPTION 0x8 /* raise as exception */
65#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
66
67/* Fields are zero when not available */
68struct mce {
69 __u64 status;
70 __u64 misc;
71 __u64 addr;
72 __u64 mcgstatus;
73 __u64 ip;
74 __u64 tsc; /* cpu time stamp counter */
75 __u64 time; /* wall time_t when error was detected */
76 __u8 cpuvendor; /* cpu vendor as encoded in system.h */
77 __u8 inject_flags; /* software inject flags */
78 __u16 pad;
79 __u32 cpuid; /* CPUID 1 EAX */
80 __u8 cs; /* code segment */
81 __u8 bank; /* machine check bank */
82 __u8 cpu; /* cpu number; obsolete; use extcpu now */
83 __u8 finished; /* entry is valid */
84 __u32 extcpu; /* linux cpu number that detected the error */
85 __u32 socketid; /* CPU socket ID */
86 __u32 apicid; /* CPU initial apic ID */
87 __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
88};
89
90/*
91 * This structure contains all data related to the MCE log. Also
92 * carries a signature to make it easier to find from external
93 * debugging tools. Each entry is only valid when its finished flag
94 * is set.
95 */
96
97#define MCE_LOG_LEN 32
98
99struct mce_log {
100 char signature[12]; /* "MACHINECHECK" */
101 unsigned len; /* = MCE_LOG_LEN */
102 unsigned next;
103 unsigned flags;
104 unsigned recordlen; /* length of struct mce */
105 struct mce entry[MCE_LOG_LEN];
106};
107
108#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
109
110#define MCE_LOG_SIGNATURE "MACHINECHECK"
111
112#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
113#define MCE_GET_LOG_LEN _IOR('M', 2, int)
114#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
115
116/* Software defined banks */
117#define MCE_EXTENDED_BANK 128
118#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
119#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
120
121#endif /* _UAPI_ASM_X86_MCE_H */
diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h
deleted file mode 100644
index 513b05f15bb..00000000000
--- a/arch/x86/include/uapi/asm/mman.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_X86_MMAN_H
2#define _ASM_X86_MMAN_H
3
4#define MAP_32BIT 0x40 /* only give out 32bit addresses */
5
6#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT)
7#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT)
8
9#include <asm-generic/mman.h>
10
11#endif /* _ASM_X86_MMAN_H */
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
deleted file mode 100644
index 809134c644a..00000000000
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/msgbuf.h>
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
deleted file mode 100644
index 433a59fb1a7..00000000000
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ /dev/null
@@ -1,525 +0,0 @@
1#ifndef _ASM_X86_MSR_INDEX_H
2#define _ASM_X86_MSR_INDEX_H
3
4/* CPU model specific register (MSR) numbers */
5
6/* x86-64 specific MSRs */
7#define MSR_EFER 0xc0000080 /* extended feature register */
8#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
9#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
10#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
11#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
12#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
13#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
14#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
15#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
16
17/* EFER bits: */
18#define _EFER_SCE 0 /* SYSCALL/SYSRET */
19#define _EFER_LME 8 /* Long mode enable */
20#define _EFER_LMA 10 /* Long mode active (read-only) */
21#define _EFER_NX 11 /* No execute enable */
22#define _EFER_SVME 12 /* Enable virtualization */
23#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
24#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
25
26#define EFER_SCE (1<<_EFER_SCE)
27#define EFER_LME (1<<_EFER_LME)
28#define EFER_LMA (1<<_EFER_LMA)
29#define EFER_NX (1<<_EFER_NX)
30#define EFER_SVME (1<<_EFER_SVME)
31#define EFER_LMSLE (1<<_EFER_LMSLE)
32#define EFER_FFXSR (1<<_EFER_FFXSR)
33
34/* Intel MSRs. Some also available on other CPUs */
35#define MSR_IA32_PERFCTR0 0x000000c1
36#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd
38#define MSR_NHM_PLATFORM_INFO 0x000000ce
39
40#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
41#define NHM_C3_AUTO_DEMOTE (1UL << 25)
42#define NHM_C1_AUTO_DEMOTE (1UL << 26)
43#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46
47#define MSR_MTRRcap 0x000000fe
48#define MSR_IA32_BBL_CR_CTL 0x00000119
49#define MSR_IA32_BBL_CR_CTL3 0x0000011e
50
51#define MSR_IA32_SYSENTER_CS 0x00000174
52#define MSR_IA32_SYSENTER_ESP 0x00000175
53#define MSR_IA32_SYSENTER_EIP 0x00000176
54
55#define MSR_IA32_MCG_CAP 0x00000179
56#define MSR_IA32_MCG_STATUS 0x0000017a
57#define MSR_IA32_MCG_CTL 0x0000017b
58
59#define MSR_OFFCORE_RSP_0 0x000001a6
60#define MSR_OFFCORE_RSP_1 0x000001a7
61#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad
62#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae
63
64#define MSR_LBR_SELECT 0x000001c8
65#define MSR_LBR_TOS 0x000001c9
66#define MSR_LBR_NHM_FROM 0x00000680
67#define MSR_LBR_NHM_TO 0x000006c0
68#define MSR_LBR_CORE_FROM 0x00000040
69#define MSR_LBR_CORE_TO 0x00000060
70
71#define MSR_IA32_PEBS_ENABLE 0x000003f1
72#define MSR_IA32_DS_AREA 0x00000600
73#define MSR_IA32_PERF_CAPABILITIES 0x00000345
74
75#define MSR_MTRRfix64K_00000 0x00000250
76#define MSR_MTRRfix16K_80000 0x00000258
77#define MSR_MTRRfix16K_A0000 0x00000259
78#define MSR_MTRRfix4K_C0000 0x00000268
79#define MSR_MTRRfix4K_C8000 0x00000269
80#define MSR_MTRRfix4K_D0000 0x0000026a
81#define MSR_MTRRfix4K_D8000 0x0000026b
82#define MSR_MTRRfix4K_E0000 0x0000026c
83#define MSR_MTRRfix4K_E8000 0x0000026d
84#define MSR_MTRRfix4K_F0000 0x0000026e
85#define MSR_MTRRfix4K_F8000 0x0000026f
86#define MSR_MTRRdefType 0x000002ff
87
88#define MSR_IA32_CR_PAT 0x00000277
89
90#define MSR_IA32_DEBUGCTLMSR 0x000001d9
91#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
92#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
93#define MSR_IA32_LASTINTFROMIP 0x000001dd
94#define MSR_IA32_LASTINTTOIP 0x000001de
95
96/* DEBUGCTLMSR bits (others vary by model): */
97#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
98#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
99#define DEBUGCTLMSR_TR (1UL << 6)
100#define DEBUGCTLMSR_BTS (1UL << 7)
101#define DEBUGCTLMSR_BTINT (1UL << 8)
102#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
103#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
104#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
105
106#define MSR_IA32_MC0_CTL 0x00000400
107#define MSR_IA32_MC0_STATUS 0x00000401
108#define MSR_IA32_MC0_ADDR 0x00000402
109#define MSR_IA32_MC0_MISC 0x00000403
110
111/* C-state Residency Counters */
112#define MSR_PKG_C3_RESIDENCY 0x000003f8
113#define MSR_PKG_C6_RESIDENCY 0x000003f9
114#define MSR_PKG_C7_RESIDENCY 0x000003fa
115#define MSR_CORE_C3_RESIDENCY 0x000003fc
116#define MSR_CORE_C6_RESIDENCY 0x000003fd
117#define MSR_CORE_C7_RESIDENCY 0x000003fe
118#define MSR_PKG_C2_RESIDENCY 0x0000060d
119
120/* Run Time Average Power Limiting (RAPL) Interface */
121
122#define MSR_RAPL_POWER_UNIT 0x00000606
123
124#define MSR_PKG_POWER_LIMIT 0x00000610
125#define MSR_PKG_ENERGY_STATUS 0x00000611
126#define MSR_PKG_PERF_STATUS 0x00000613
127#define MSR_PKG_POWER_INFO 0x00000614
128
129#define MSR_DRAM_POWER_LIMIT 0x00000618
130#define MSR_DRAM_ENERGY_STATUS 0x00000619
131#define MSR_DRAM_PERF_STATUS 0x0000061b
132#define MSR_DRAM_POWER_INFO 0x0000061c
133
134#define MSR_PP0_POWER_LIMIT 0x00000638
135#define MSR_PP0_ENERGY_STATUS 0x00000639
136#define MSR_PP0_POLICY 0x0000063a
137#define MSR_PP0_PERF_STATUS 0x0000063b
138
139#define MSR_PP1_POWER_LIMIT 0x00000640
140#define MSR_PP1_ENERGY_STATUS 0x00000641
141#define MSR_PP1_POLICY 0x00000642
142
143#define MSR_AMD64_MC0_MASK 0xc0010044
144
145#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
146#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
147#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
148#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
149
150#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
151
152/* These are consecutive and not in the normal 4er MCE bank block */
153#define MSR_IA32_MC0_CTL2 0x00000280
154#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
155
156#define MSR_P6_PERFCTR0 0x000000c1
157#define MSR_P6_PERFCTR1 0x000000c2
158#define MSR_P6_EVNTSEL0 0x00000186
159#define MSR_P6_EVNTSEL1 0x00000187
160
161#define MSR_KNC_PERFCTR0 0x00000020
162#define MSR_KNC_PERFCTR1 0x00000021
163#define MSR_KNC_EVNTSEL0 0x00000028
164#define MSR_KNC_EVNTSEL1 0x00000029
165
166/* AMD64 MSRs. Not complete. See the architecture manual for a more
167 complete list. */
168
169#define MSR_AMD64_PATCH_LEVEL 0x0000008b
170#define MSR_AMD64_TSC_RATIO 0xc0000104
171#define MSR_AMD64_NB_CFG 0xc001001f
172#define MSR_AMD64_PATCH_LOADER 0xc0010020
173#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
174#define MSR_AMD64_OSVW_STATUS 0xc0010141
175#define MSR_AMD64_DC_CFG 0xc0011022
176#define MSR_AMD64_IBSFETCHCTL 0xc0011030
177#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
178#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
179#define MSR_AMD64_IBSFETCH_REG_COUNT 3
180#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
181#define MSR_AMD64_IBSOPCTL 0xc0011033
182#define MSR_AMD64_IBSOPRIP 0xc0011034
183#define MSR_AMD64_IBSOPDATA 0xc0011035
184#define MSR_AMD64_IBSOPDATA2 0xc0011036
185#define MSR_AMD64_IBSOPDATA3 0xc0011037
186#define MSR_AMD64_IBSDCLINAD 0xc0011038
187#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
188#define MSR_AMD64_IBSOP_REG_COUNT 7
189#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
190#define MSR_AMD64_IBSCTL 0xc001103a
191#define MSR_AMD64_IBSBRTARGET 0xc001103b
192#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
193
194/* Fam 15h MSRs */
195#define MSR_F15H_PERF_CTL 0xc0010200
196#define MSR_F15H_PERF_CTR 0xc0010201
197
198/* Fam 10h MSRs */
199#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
200#define FAM10H_MMIO_CONF_ENABLE (1<<0)
201#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
202#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
203#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
204#define FAM10H_MMIO_CONF_BASE_SHIFT 20
205#define MSR_FAM10H_NODE_ID 0xc001100c
206
207/* K8 MSRs */
208#define MSR_K8_TOP_MEM1 0xc001001a
209#define MSR_K8_TOP_MEM2 0xc001001d
210#define MSR_K8_SYSCFG 0xc0010010
211#define MSR_K8_INT_PENDING_MSG 0xc0010055
212/* C1E active bits in int pending message */
213#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
214#define MSR_K8_TSEG_ADDR 0xc0010112
215#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
216#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
217#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
218
219/* K7 MSRs */
220#define MSR_K7_EVNTSEL0 0xc0010000
221#define MSR_K7_PERFCTR0 0xc0010004
222#define MSR_K7_EVNTSEL1 0xc0010001
223#define MSR_K7_PERFCTR1 0xc0010005
224#define MSR_K7_EVNTSEL2 0xc0010002
225#define MSR_K7_PERFCTR2 0xc0010006
226#define MSR_K7_EVNTSEL3 0xc0010003
227#define MSR_K7_PERFCTR3 0xc0010007
228#define MSR_K7_CLK_CTL 0xc001001b
229#define MSR_K7_HWCR 0xc0010015
230#define MSR_K7_FID_VID_CTL 0xc0010041
231#define MSR_K7_FID_VID_STATUS 0xc0010042
232
233/* K6 MSRs */
234#define MSR_K6_WHCR 0xc0000082
235#define MSR_K6_UWCCR 0xc0000085
236#define MSR_K6_EPMR 0xc0000086
237#define MSR_K6_PSOR 0xc0000087
238#define MSR_K6_PFIR 0xc0000088
239
240/* Centaur-Hauls/IDT defined MSRs. */
241#define MSR_IDT_FCR1 0x00000107
242#define MSR_IDT_FCR2 0x00000108
243#define MSR_IDT_FCR3 0x00000109
244#define MSR_IDT_FCR4 0x0000010a
245
246#define MSR_IDT_MCR0 0x00000110
247#define MSR_IDT_MCR1 0x00000111
248#define MSR_IDT_MCR2 0x00000112
249#define MSR_IDT_MCR3 0x00000113
250#define MSR_IDT_MCR4 0x00000114
251#define MSR_IDT_MCR5 0x00000115
252#define MSR_IDT_MCR6 0x00000116
253#define MSR_IDT_MCR7 0x00000117
254#define MSR_IDT_MCR_CTRL 0x00000120
255
256/* VIA Cyrix defined MSRs*/
257#define MSR_VIA_FCR 0x00001107
258#define MSR_VIA_LONGHAUL 0x0000110a
259#define MSR_VIA_RNG 0x0000110b
260#define MSR_VIA_BCR2 0x00001147
261
262/* Transmeta defined MSRs */
263#define MSR_TMTA_LONGRUN_CTRL 0x80868010
264#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
265#define MSR_TMTA_LRTI_READOUT 0x80868018
266#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
267
268/* Intel defined MSRs. */
269#define MSR_IA32_P5_MC_ADDR 0x00000000
270#define MSR_IA32_P5_MC_TYPE 0x00000001
271#define MSR_IA32_TSC 0x00000010
272#define MSR_IA32_PLATFORM_ID 0x00000017
273#define MSR_IA32_EBL_CR_POWERON 0x0000002a
274#define MSR_EBC_FREQUENCY_ID 0x0000002c
275#define MSR_IA32_FEATURE_CONTROL 0x0000003a
276#define MSR_IA32_TSC_ADJUST 0x0000003b
277
278#define FEATURE_CONTROL_LOCKED (1<<0)
279#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
280#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
281
282#define MSR_IA32_APICBASE 0x0000001b
283#define MSR_IA32_APICBASE_BSP (1<<8)
284#define MSR_IA32_APICBASE_ENABLE (1<<11)
285#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
286
287#define MSR_IA32_TSCDEADLINE 0x000006e0
288
289#define MSR_IA32_UCODE_WRITE 0x00000079
290#define MSR_IA32_UCODE_REV 0x0000008b
291
292#define MSR_IA32_PERF_STATUS 0x00000198
293#define MSR_IA32_PERF_CTL 0x00000199
294#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
295#define MSR_AMD_PERF_STATUS 0xc0010063
296#define MSR_AMD_PERF_CTL 0xc0010062
297
298#define MSR_IA32_MPERF 0x000000e7
299#define MSR_IA32_APERF 0x000000e8
300
301#define MSR_IA32_THERM_CONTROL 0x0000019a
302#define MSR_IA32_THERM_INTERRUPT 0x0000019b
303
304#define THERM_INT_HIGH_ENABLE (1 << 0)
305#define THERM_INT_LOW_ENABLE (1 << 1)
306#define THERM_INT_PLN_ENABLE (1 << 24)
307
308#define MSR_IA32_THERM_STATUS 0x0000019c
309
310#define THERM_STATUS_PROCHOT (1 << 0)
311#define THERM_STATUS_POWER_LIMIT (1 << 10)
312
313#define MSR_THERM2_CTL 0x0000019d
314
315#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
316
317#define MSR_IA32_MISC_ENABLE 0x000001a0
318
319#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
320
321#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
322#define ENERGY_PERF_BIAS_PERFORMANCE 0
323#define ENERGY_PERF_BIAS_NORMAL 6
324#define ENERGY_PERF_BIAS_POWERSAVE 15
325
326#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
327
328#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0)
329#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10)
330
331#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
332
333#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0)
334#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
335#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
336
337/* Thermal Thresholds Support */
338#define THERM_INT_THRESHOLD0_ENABLE (1 << 15)
339#define THERM_SHIFT_THRESHOLD0 8
340#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0)
341#define THERM_INT_THRESHOLD1_ENABLE (1 << 23)
342#define THERM_SHIFT_THRESHOLD1 16
343#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1)
344#define THERM_STATUS_THRESHOLD0 (1 << 6)
345#define THERM_LOG_THRESHOLD0 (1 << 7)
346#define THERM_STATUS_THRESHOLD1 (1 << 8)
347#define THERM_LOG_THRESHOLD1 (1 << 9)
348
349/* MISC_ENABLE bits: architectural */
350#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
351#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
352#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7)
353#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11)
354#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12)
355#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16)
356#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
357#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22)
358#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23)
359#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34)
360
361/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
362#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2)
363#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3)
364#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4)
365#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6)
366#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8)
367#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9)
368#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10)
369#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10)
370#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13)
371#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19)
372#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20)
373#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24)
374#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37)
375#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
376#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39)
377
378#define MSR_IA32_TSC_DEADLINE 0x000006E0
379
380/* P4/Xeon+ specific */
381#define MSR_IA32_MCG_EAX 0x00000180
382#define MSR_IA32_MCG_EBX 0x00000181
383#define MSR_IA32_MCG_ECX 0x00000182
384#define MSR_IA32_MCG_EDX 0x00000183
385#define MSR_IA32_MCG_ESI 0x00000184
386#define MSR_IA32_MCG_EDI 0x00000185
387#define MSR_IA32_MCG_EBP 0x00000186
388#define MSR_IA32_MCG_ESP 0x00000187
389#define MSR_IA32_MCG_EFLAGS 0x00000188
390#define MSR_IA32_MCG_EIP 0x00000189
391#define MSR_IA32_MCG_RESERVED 0x0000018a
392
393/* Pentium IV performance counter MSRs */
394#define MSR_P4_BPU_PERFCTR0 0x00000300
395#define MSR_P4_BPU_PERFCTR1 0x00000301
396#define MSR_P4_BPU_PERFCTR2 0x00000302
397#define MSR_P4_BPU_PERFCTR3 0x00000303
398#define MSR_P4_MS_PERFCTR0 0x00000304
399#define MSR_P4_MS_PERFCTR1 0x00000305
400#define MSR_P4_MS_PERFCTR2 0x00000306
401#define MSR_P4_MS_PERFCTR3 0x00000307
402#define MSR_P4_FLAME_PERFCTR0 0x00000308
403#define MSR_P4_FLAME_PERFCTR1 0x00000309
404#define MSR_P4_FLAME_PERFCTR2 0x0000030a
405#define MSR_P4_FLAME_PERFCTR3 0x0000030b
406#define MSR_P4_IQ_PERFCTR0 0x0000030c
407#define MSR_P4_IQ_PERFCTR1 0x0000030d
408#define MSR_P4_IQ_PERFCTR2 0x0000030e
409#define MSR_P4_IQ_PERFCTR3 0x0000030f
410#define MSR_P4_IQ_PERFCTR4 0x00000310
411#define MSR_P4_IQ_PERFCTR5 0x00000311
412#define MSR_P4_BPU_CCCR0 0x00000360
413#define MSR_P4_BPU_CCCR1 0x00000361
414#define MSR_P4_BPU_CCCR2 0x00000362
415#define MSR_P4_BPU_CCCR3 0x00000363
416#define MSR_P4_MS_CCCR0 0x00000364
417#define MSR_P4_MS_CCCR1 0x00000365
418#define MSR_P4_MS_CCCR2 0x00000366
419#define MSR_P4_MS_CCCR3 0x00000367
420#define MSR_P4_FLAME_CCCR0 0x00000368
421#define MSR_P4_FLAME_CCCR1 0x00000369
422#define MSR_P4_FLAME_CCCR2 0x0000036a
423#define MSR_P4_FLAME_CCCR3 0x0000036b
424#define MSR_P4_IQ_CCCR0 0x0000036c
425#define MSR_P4_IQ_CCCR1 0x0000036d
426#define MSR_P4_IQ_CCCR2 0x0000036e
427#define MSR_P4_IQ_CCCR3 0x0000036f
428#define MSR_P4_IQ_CCCR4 0x00000370
429#define MSR_P4_IQ_CCCR5 0x00000371
430#define MSR_P4_ALF_ESCR0 0x000003ca
431#define MSR_P4_ALF_ESCR1 0x000003cb
432#define MSR_P4_BPU_ESCR0 0x000003b2
433#define MSR_P4_BPU_ESCR1 0x000003b3
434#define MSR_P4_BSU_ESCR0 0x000003a0
435#define MSR_P4_BSU_ESCR1 0x000003a1
436#define MSR_P4_CRU_ESCR0 0x000003b8
437#define MSR_P4_CRU_ESCR1 0x000003b9
438#define MSR_P4_CRU_ESCR2 0x000003cc
439#define MSR_P4_CRU_ESCR3 0x000003cd
440#define MSR_P4_CRU_ESCR4 0x000003e0
441#define MSR_P4_CRU_ESCR5 0x000003e1
442#define MSR_P4_DAC_ESCR0 0x000003a8
443#define MSR_P4_DAC_ESCR1 0x000003a9
444#define MSR_P4_FIRM_ESCR0 0x000003a4
445#define MSR_P4_FIRM_ESCR1 0x000003a5
446#define MSR_P4_FLAME_ESCR0 0x000003a6
447#define MSR_P4_FLAME_ESCR1 0x000003a7
448#define MSR_P4_FSB_ESCR0 0x000003a2
449#define MSR_P4_FSB_ESCR1 0x000003a3
450#define MSR_P4_IQ_ESCR0 0x000003ba
451#define MSR_P4_IQ_ESCR1 0x000003bb
452#define MSR_P4_IS_ESCR0 0x000003b4
453#define MSR_P4_IS_ESCR1 0x000003b5
454#define MSR_P4_ITLB_ESCR0 0x000003b6
455#define MSR_P4_ITLB_ESCR1 0x000003b7
456#define MSR_P4_IX_ESCR0 0x000003c8
457#define MSR_P4_IX_ESCR1 0x000003c9
458#define MSR_P4_MOB_ESCR0 0x000003aa
459#define MSR_P4_MOB_ESCR1 0x000003ab
460#define MSR_P4_MS_ESCR0 0x000003c0
461#define MSR_P4_MS_ESCR1 0x000003c1
462#define MSR_P4_PMH_ESCR0 0x000003ac
463#define MSR_P4_PMH_ESCR1 0x000003ad
464#define MSR_P4_RAT_ESCR0 0x000003bc
465#define MSR_P4_RAT_ESCR1 0x000003bd
466#define MSR_P4_SAAT_ESCR0 0x000003ae
467#define MSR_P4_SAAT_ESCR1 0x000003af
468#define MSR_P4_SSU_ESCR0 0x000003be
469#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
470
471#define MSR_P4_TBPU_ESCR0 0x000003c2
472#define MSR_P4_TBPU_ESCR1 0x000003c3
473#define MSR_P4_TC_ESCR0 0x000003c4
474#define MSR_P4_TC_ESCR1 0x000003c5
475#define MSR_P4_U2L_ESCR0 0x000003b0
476#define MSR_P4_U2L_ESCR1 0x000003b1
477
478#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
479
480/* Intel Core-based CPU performance counters */
481#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
482#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
483#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
484#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
485#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
486#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
487#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
488
489/* Geode defined MSRs */
490#define MSR_GEODE_BUSCONT_CONF0 0x00001900
491
492/* Intel VT MSRs */
493#define MSR_IA32_VMX_BASIC 0x00000480
494#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
495#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
496#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
497#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
498#define MSR_IA32_VMX_MISC 0x00000485
499#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
500#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
501#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
502#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
503#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
504#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
505#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
506#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
507#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
508#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
509#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
510
511/* VMX_BASIC bits and bitmasks */
512#define VMX_BASIC_VMCS_SIZE_SHIFT 32
513#define VMX_BASIC_64 0x0001000000000000LLU
514#define VMX_BASIC_MEM_TYPE_SHIFT 50
515#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
516#define VMX_BASIC_MEM_TYPE_WB 6LLU
517#define VMX_BASIC_INOUT 0x0040000000000000LLU
518
519/* AMD-V MSRs */
520
521#define MSR_VM_CR 0xc0010114
522#define MSR_VM_IGNNE 0xc0010115
523#define MSR_VM_HSAVE_PA 0xc0010117
524
525#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/arch/x86/include/uapi/asm/msr.h b/arch/x86/include/uapi/asm/msr.h
deleted file mode 100644
index 155e51048fa..00000000000
--- a/arch/x86/include/uapi/asm/msr.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _UAPI_ASM_X86_MSR_H
2#define _UAPI_ASM_X86_MSR_H
3
4#include <asm/msr-index.h>
5
6#ifndef __ASSEMBLY__
7
8#include <linux/types.h>
9#include <linux/ioctl.h>
10
11#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
12#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
13
14#endif /* __ASSEMBLY__ */
15#endif /* _UAPI_ASM_X86_MSR_H */
diff --git a/arch/x86/include/uapi/asm/mtrr.h b/arch/x86/include/uapi/asm/mtrr.h
deleted file mode 100644
index d0acb658c8f..00000000000
--- a/arch/x86/include/uapi/asm/mtrr.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/* Generic MTRR (Memory Type Range Register) ioctls.
2
3 Copyright (C) 1997-1999 Richard Gooch
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/
23#ifndef _UAPI_ASM_X86_MTRR_H
24#define _UAPI_ASM_X86_MTRR_H
25
26#include <linux/types.h>
27#include <linux/ioctl.h>
28#include <linux/errno.h>
29
30#define MTRR_IOCTL_BASE 'M'
31
32/* Warning: this structure has a different order from i386
33 on x86-64. The 32bit emulation code takes care of that.
34 But you need to use this for 64bit, otherwise your X server
35 will break. */
36
37#ifdef __i386__
38struct mtrr_sentry {
39 unsigned long base; /* Base address */
40 unsigned int size; /* Size of region */
41 unsigned int type; /* Type of region */
42};
43
44struct mtrr_gentry {
45 unsigned int regnum; /* Register number */
46 unsigned long base; /* Base address */
47 unsigned int size; /* Size of region */
48 unsigned int type; /* Type of region */
49};
50
51#else /* __i386__ */
52
53struct mtrr_sentry {
54 __u64 base; /* Base address */
55 __u32 size; /* Size of region */
56 __u32 type; /* Type of region */
57};
58
59struct mtrr_gentry {
60 __u64 base; /* Base address */
61 __u32 size; /* Size of region */
62 __u32 regnum; /* Register number */
63 __u32 type; /* Type of region */
64 __u32 _pad; /* Unused */
65};
66
67#endif /* !__i386__ */
68
69struct mtrr_var_range {
70 __u32 base_lo;
71 __u32 base_hi;
72 __u32 mask_lo;
73 __u32 mask_hi;
74};
75
76/* In the Intel processor's MTRR interface, the MTRR type is always held in
77 an 8 bit field: */
78typedef __u8 mtrr_type;
79
80#define MTRR_NUM_FIXED_RANGES 88
81#define MTRR_MAX_VAR_RANGES 256
82
83struct mtrr_state_type {
84 struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
85 mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
86 unsigned char enabled;
87 unsigned char have_fixed;
88 mtrr_type def_type;
89};
90
91#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
92#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
93
94/* These are the various ioctls */
95#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
96#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
97#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
98#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
99#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
100#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
101#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
102#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
103#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
104#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
105
106/* These are the region types */
107#define MTRR_TYPE_UNCACHABLE 0
108#define MTRR_TYPE_WRCOMB 1
109/*#define MTRR_TYPE_ 2*/
110/*#define MTRR_TYPE_ 3*/
111#define MTRR_TYPE_WRTHROUGH 4
112#define MTRR_TYPE_WRPROT 5
113#define MTRR_TYPE_WRBACK 6
114#define MTRR_NUM_TYPES 7
115
116
117#endif /* _UAPI_ASM_X86_MTRR_H */
diff --git a/arch/x86/include/uapi/asm/param.h b/arch/x86/include/uapi/asm/param.h
deleted file mode 100644
index 965d4542797..00000000000
--- a/arch/x86/include/uapi/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/param.h>
diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h
deleted file mode 100644
index 3f2207bfd17..00000000000
--- a/arch/x86/include/uapi/asm/perf_regs.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef _ASM_X86_PERF_REGS_H
2#define _ASM_X86_PERF_REGS_H
3
4enum perf_event_x86_regs {
5 PERF_REG_X86_AX,
6 PERF_REG_X86_BX,
7 PERF_REG_X86_CX,
8 PERF_REG_X86_DX,
9 PERF_REG_X86_SI,
10 PERF_REG_X86_DI,
11 PERF_REG_X86_BP,
12 PERF_REG_X86_SP,
13 PERF_REG_X86_IP,
14 PERF_REG_X86_FLAGS,
15 PERF_REG_X86_CS,
16 PERF_REG_X86_SS,
17 PERF_REG_X86_DS,
18 PERF_REG_X86_ES,
19 PERF_REG_X86_FS,
20 PERF_REG_X86_GS,
21 PERF_REG_X86_R8,
22 PERF_REG_X86_R9,
23 PERF_REG_X86_R10,
24 PERF_REG_X86_R11,
25 PERF_REG_X86_R12,
26 PERF_REG_X86_R13,
27 PERF_REG_X86_R14,
28 PERF_REG_X86_R15,
29
30 PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
31 PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
32};
33#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/arch/x86/include/uapi/asm/poll.h b/arch/x86/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d3149..00000000000
--- a/arch/x86/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/poll.h>
diff --git a/arch/x86/include/uapi/asm/posix_types.h b/arch/x86/include/uapi/asm/posix_types.h
deleted file mode 100644
index 85506b38362..00000000000
--- a/arch/x86/include/uapi/asm/posix_types.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __KERNEL__
2# ifdef __i386__
3# include <asm/posix_types_32.h>
4# elif defined(__ILP32__)
5# include <asm/posix_types_x32.h>
6# else
7# include <asm/posix_types_64.h>
8# endif
9#endif
diff --git a/arch/x86/include/uapi/asm/posix_types_32.h b/arch/x86/include/uapi/asm/posix_types_32.h
deleted file mode 100644
index 8e525059e7d..00000000000
--- a/arch/x86/include/uapi/asm/posix_types_32.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _ASM_X86_POSIX_TYPES_32_H
2#define _ASM_X86_POSIX_TYPES_32_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t
12
13typedef unsigned short __kernel_ipc_pid_t;
14#define __kernel_ipc_pid_t __kernel_ipc_pid_t
15
16typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t;
18#define __kernel_uid_t __kernel_uid_t
19
20typedef unsigned short __kernel_old_dev_t;
21#define __kernel_old_dev_t __kernel_old_dev_t
22
23#include <asm-generic/posix_types.h>
24
25#endif /* _ASM_X86_POSIX_TYPES_32_H */
diff --git a/arch/x86/include/uapi/asm/posix_types_64.h b/arch/x86/include/uapi/asm/posix_types_64.h
deleted file mode 100644
index cba0c1ead16..00000000000
--- a/arch/x86/include/uapi/asm/posix_types_64.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _ASM_X86_POSIX_TYPES_64_H
2#define _ASM_X86_POSIX_TYPES_64_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned short __kernel_old_uid_t;
11typedef unsigned short __kernel_old_gid_t;
12#define __kernel_old_uid_t __kernel_old_uid_t
13
14typedef unsigned long __kernel_old_dev_t;
15#define __kernel_old_dev_t __kernel_old_dev_t
16
17#include <asm-generic/posix_types.h>
18
19#endif /* _ASM_X86_POSIX_TYPES_64_H */
diff --git a/arch/x86/include/uapi/asm/posix_types_x32.h b/arch/x86/include/uapi/asm/posix_types_x32.h
deleted file mode 100644
index 85f9bdafa93..00000000000
--- a/arch/x86/include/uapi/asm/posix_types_x32.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _ASM_X86_POSIX_TYPES_X32_H
2#define _ASM_X86_POSIX_TYPES_X32_H
3
4/*
5 * This file is only used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 *
9 * These types should generally match the ones used by the 64-bit kernel,
10 *
11 */
12
13typedef long long __kernel_long_t;
14typedef unsigned long long __kernel_ulong_t;
15#define __kernel_long_t __kernel_long_t
16
17#include <asm/posix_types_64.h>
18
19#endif /* _ASM_X86_POSIX_TYPES_X32_H */
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
deleted file mode 100644
index 3ac5032fae0..00000000000
--- a/arch/x86/include/uapi/asm/prctl.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef _ASM_X86_PRCTL_H
2#define _ASM_X86_PRCTL_H
3
4#define ARCH_SET_GS 0x1001
5#define ARCH_SET_FS 0x1002
6#define ARCH_GET_FS 0x1003
7#define ARCH_GET_GS 0x1004
8
9#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
deleted file mode 100644
index 54991a74604..00000000000
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ /dev/null
@@ -1,99 +0,0 @@
1#ifndef _UAPI_ASM_X86_PROCESSOR_FLAGS_H
2#define _UAPI_ASM_X86_PROCESSOR_FLAGS_H
3/* Various flags defined: can be included from assembler. */
4
5/*
6 * EFLAGS bits
7 */
8#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
9#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */
10#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
11#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
12#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
13#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
14#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
15#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
16#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
17#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
18#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
19#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
20#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
21#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
22#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
23#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
24#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
25#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
26
27/*
28 * Basic CPU control in CR0
29 */
30#define X86_CR0_PE 0x00000001 /* Protection Enable */
31#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
32#define X86_CR0_EM 0x00000004 /* Emulation */
33#define X86_CR0_TS 0x00000008 /* Task Switched */
34#define X86_CR0_ET 0x00000010 /* Extension Type */
35#define X86_CR0_NE 0x00000020 /* Numeric Error */
36#define X86_CR0_WP 0x00010000 /* Write Protect */
37#define X86_CR0_AM 0x00040000 /* Alignment Mask */
38#define X86_CR0_NW 0x20000000 /* Not Write-through */
39#define X86_CR0_CD 0x40000000 /* Cache Disable */
40#define X86_CR0_PG 0x80000000 /* Paging */
41
42/*
43 * Paging options in CR3
44 */
45#define X86_CR3_PWT 0x00000008 /* Page Write Through */
46#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
47#define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */
48
49/*
50 * Intel CPU features in CR4
51 */
52#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
53#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
54#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
55#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
56#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
57#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
58#define X86_CR4_MCE 0x00000040 /* Machine check enable */
59#define X86_CR4_PGE 0x00000080 /* enable global pages */
60#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
61#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
62#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
63#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
64#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
65#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
66#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
67#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
68#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
69
70/*
71 * x86-64 Task Priority Register, CR8
72 */
73#define X86_CR8_TPR 0x0000000F /* task priority register */
74
75/*
76 * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
77 */
78
79/*
80 * NSC/Cyrix CPU configuration register indexes
81 */
82#define CX86_PCR0 0x20
83#define CX86_GCR 0xb8
84#define CX86_CCR0 0xc0
85#define CX86_CCR1 0xc1
86#define CX86_CCR2 0xc2
87#define CX86_CCR3 0xc3
88#define CX86_CCR4 0xe8
89#define CX86_CCR5 0xe9
90#define CX86_CCR6 0xea
91#define CX86_CCR7 0xeb
92#define CX86_PCR1 0xf0
93#define CX86_DIR0 0xfe
94#define CX86_DIR1 0xff
95#define CX86_ARR_BASE 0xc4
96#define CX86_RCR_BASE 0xdc
97
98
99#endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
deleted file mode 100644
index 7b0a55a8885..00000000000
--- a/arch/x86/include/uapi/asm/ptrace-abi.h
+++ /dev/null
@@ -1,87 +0,0 @@
1#ifndef _ASM_X86_PTRACE_ABI_H
2#define _ASM_X86_PTRACE_ABI_H
3
4#ifdef __i386__
5
6#define EBX 0
7#define ECX 1
8#define EDX 2
9#define ESI 3
10#define EDI 4
11#define EBP 5
12#define EAX 6
13#define DS 7
14#define ES 8
15#define FS 9
16#define GS 10
17#define ORIG_EAX 11
18#define EIP 12
19#define CS 13
20#define EFL 14
21#define UESP 15
22#define SS 16
23#define FRAME_SIZE 17
24
25#else /* __i386__ */
26
27#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
28#define R15 0
29#define R14 8
30#define R13 16
31#define R12 24
32#define RBP 32
33#define RBX 40
34/* arguments: interrupts/non tracing syscalls only save up to here*/
35#define R11 48
36#define R10 56
37#define R9 64
38#define R8 72
39#define RAX 80
40#define RCX 88
41#define RDX 96
42#define RSI 104
43#define RDI 112
44#define ORIG_RAX 120 /* = ERROR */
45/* end of arguments */
46/* cpu exception frame or undefined in case of fast syscall. */
47#define RIP 128
48#define CS 136
49#define EFLAGS 144
50#define RSP 152
51#define SS 160
52#define ARGOFFSET R11
53#endif /* __ASSEMBLY__ */
54
55/* top of stack page */
56#define FRAME_SIZE 168
57
58#endif /* !__i386__ */
59
60/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
61#define PTRACE_GETREGS 12
62#define PTRACE_SETREGS 13
63#define PTRACE_GETFPREGS 14
64#define PTRACE_SETFPREGS 15
65#define PTRACE_GETFPXREGS 18
66#define PTRACE_SETFPXREGS 19
67
68#define PTRACE_OLDSETOPTIONS 21
69
70/* only useful for access 32bit programs / kernels */
71#define PTRACE_GET_THREAD_AREA 25
72#define PTRACE_SET_THREAD_AREA 26
73
74#ifdef __x86_64__
75# define PTRACE_ARCH_PRCTL 30
76#endif
77
78#define PTRACE_SYSEMU 31
79#define PTRACE_SYSEMU_SINGLESTEP 32
80
81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
82
83#ifndef __ASSEMBLY__
84#include <linux/types.h>
85#endif
86
87#endif /* _ASM_X86_PTRACE_ABI_H */
diff --git a/arch/x86/include/uapi/asm/ptrace.h b/arch/x86/include/uapi/asm/ptrace.h
deleted file mode 100644
index ac4b9aa4d99..00000000000
--- a/arch/x86/include/uapi/asm/ptrace.h
+++ /dev/null
@@ -1,78 +0,0 @@
1#ifndef _UAPI_ASM_X86_PTRACE_H
2#define _UAPI_ASM_X86_PTRACE_H
3
4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h>
6#include <asm/processor-flags.h>
7
8
9#ifndef __ASSEMBLY__
10
11#ifdef __i386__
12/* this struct defines the way the registers are stored on the
13 stack during a system call. */
14
15#ifndef __KERNEL__
16
17struct pt_regs {
18 long ebx;
19 long ecx;
20 long edx;
21 long esi;
22 long edi;
23 long ebp;
24 long eax;
25 int xds;
26 int xes;
27 int xfs;
28 int xgs;
29 long orig_eax;
30 long eip;
31 int xcs;
32 long eflags;
33 long esp;
34 int xss;
35};
36
37#endif /* __KERNEL__ */
38
39#else /* __i386__ */
40
41#ifndef __KERNEL__
42
43struct pt_regs {
44 unsigned long r15;
45 unsigned long r14;
46 unsigned long r13;
47 unsigned long r12;
48 unsigned long rbp;
49 unsigned long rbx;
50/* arguments: non interrupts/non tracing syscalls only save up to here*/
51 unsigned long r11;
52 unsigned long r10;
53 unsigned long r9;
54 unsigned long r8;
55 unsigned long rax;
56 unsigned long rcx;
57 unsigned long rdx;
58 unsigned long rsi;
59 unsigned long rdi;
60 unsigned long orig_rax;
61/* end of arguments */
62/* cpu exception frame or undefined */
63 unsigned long rip;
64 unsigned long cs;
65 unsigned long eflags;
66 unsigned long rsp;
67 unsigned long ss;
68/* top of stack page */
69};
70
71#endif /* __KERNEL__ */
72#endif /* !__i386__ */
73
74
75
76#endif /* !__ASSEMBLY__ */
77
78#endif /* _UAPI_ASM_X86_PTRACE_H */
diff --git a/arch/x86/include/uapi/asm/resource.h b/arch/x86/include/uapi/asm/resource.h
deleted file mode 100644
index 04bc4db8921..00000000000
--- a/arch/x86/include/uapi/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/resource.h>
diff --git a/arch/x86/include/uapi/asm/sembuf.h b/arch/x86/include/uapi/asm/sembuf.h
deleted file mode 100644
index ee50c801f7b..00000000000
--- a/arch/x86/include/uapi/asm/sembuf.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _ASM_X86_SEMBUF_H
2#define _ASM_X86_SEMBUF_H
3
4/*
5 * The semid64_ds structure for x86 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 */
13struct semid64_ds {
14 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
15 __kernel_time_t sem_otime; /* last semop time */
16 unsigned long __unused1;
17 __kernel_time_t sem_ctime; /* last change time */
18 unsigned long __unused2;
19 unsigned long sem_nsems; /* no. of semaphores in array */
20 unsigned long __unused3;
21 unsigned long __unused4;
22};
23
24#endif /* _ASM_X86_SEMBUF_H */
diff --git a/arch/x86/include/uapi/asm/setup.h b/arch/x86/include/uapi/asm/setup.h
deleted file mode 100644
index 79a9626b550..00000000000
--- a/arch/x86/include/uapi/asm/setup.h
+++ /dev/null
@@ -1 +0,0 @@
1/* */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de3..00000000000
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/shmbuf.h>
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
deleted file mode 100644
index d8b9f9081e8..00000000000
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ /dev/null
@@ -1,221 +0,0 @@
1#ifndef _UAPI_ASM_X86_SIGCONTEXT_H
2#define _UAPI_ASM_X86_SIGCONTEXT_H
3
4#include <linux/compiler.h>
5#include <linux/types.h>
6
7#define FP_XSTATE_MAGIC1 0x46505853U
8#define FP_XSTATE_MAGIC2 0x46505845U
9#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
10
11/*
12 * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
13 * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
14 * are used to extended the fpstate pointer in the sigcontext, which now
15 * includes the extended state information along with fpstate information.
16 *
17 * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
18 * area and FP_XSTATE_MAGIC2 at the end of memory layout
19 * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
20 * extended state information in the memory layout pointed by the fpstate
21 * pointer in sigcontext.
22 */
23struct _fpx_sw_bytes {
24 __u32 magic1; /* FP_XSTATE_MAGIC1 */
25 __u32 extended_size; /* total size of the layout referred by
26 * fpstate pointer in the sigcontext.
27 */
28 __u64 xstate_bv;
29 /* feature bit mask (including fp/sse/extended
30 * state) that is present in the memory
31 * layout.
32 */
33 __u32 xstate_size; /* actual xsave state size, based on the
34 * features saved in the layout.
35 * 'extended_size' will be greater than
36 * 'xstate_size'.
37 */
38 __u32 padding[7]; /* for future use. */
39};
40
41#ifdef __i386__
42/*
43 * As documented in the iBCS2 standard..
44 *
45 * The first part of "struct _fpstate" is just the normal i387
46 * hardware setup, the extra "status" word is used to save the
47 * coprocessor status word before entering the handler.
48 *
49 * Pentium III FXSR, SSE support
50 * Gareth Hughes <gareth@valinux.com>, May 2000
51 *
52 * The FPU state data structure has had to grow to accommodate the
53 * extended FPU state required by the Streaming SIMD Extensions.
54 * There is no documented standard to accomplish this at the moment.
55 */
56struct _fpreg {
57 unsigned short significand[4];
58 unsigned short exponent;
59};
60
61struct _fpxreg {
62 unsigned short significand[4];
63 unsigned short exponent;
64 unsigned short padding[3];
65};
66
67struct _xmmreg {
68 unsigned long element[4];
69};
70
71struct _fpstate {
72 /* Regular FPU environment */
73 unsigned long cw;
74 unsigned long sw;
75 unsigned long tag;
76 unsigned long ipoff;
77 unsigned long cssel;
78 unsigned long dataoff;
79 unsigned long datasel;
80 struct _fpreg _st[8];
81 unsigned short status;
82 unsigned short magic; /* 0xffff = regular FPU data only */
83
84 /* FXSR FPU environment */
85 unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
86 unsigned long mxcsr;
87 unsigned long reserved;
88 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
89 struct _xmmreg _xmm[8];
90 unsigned long padding1[44];
91
92 union {
93 unsigned long padding2[12];
94 struct _fpx_sw_bytes sw_reserved; /* represents the extended
95 * state info */
96 };
97};
98
99#define X86_FXSR_MAGIC 0x0000
100
101#ifndef __KERNEL__
102/*
103 * User-space might still rely on the old definition:
104 */
105struct sigcontext {
106 unsigned short gs, __gsh;
107 unsigned short fs, __fsh;
108 unsigned short es, __esh;
109 unsigned short ds, __dsh;
110 unsigned long edi;
111 unsigned long esi;
112 unsigned long ebp;
113 unsigned long esp;
114 unsigned long ebx;
115 unsigned long edx;
116 unsigned long ecx;
117 unsigned long eax;
118 unsigned long trapno;
119 unsigned long err;
120 unsigned long eip;
121 unsigned short cs, __csh;
122 unsigned long eflags;
123 unsigned long esp_at_signal;
124 unsigned short ss, __ssh;
125 struct _fpstate __user *fpstate;
126 unsigned long oldmask;
127 unsigned long cr2;
128};
129#endif /* !__KERNEL__ */
130
131#else /* __i386__ */
132
133/* FXSAVE frame */
134/* Note: reserved1/2 may someday contain valuable data. Always save/restore
135 them when you change signal frames. */
136struct _fpstate {
137 __u16 cwd;
138 __u16 swd;
139 __u16 twd; /* Note this is not the same as the
140 32bit/x87/FSAVE twd */
141 __u16 fop;
142 __u64 rip;
143 __u64 rdp;
144 __u32 mxcsr;
145 __u32 mxcsr_mask;
146 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
147 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
148 __u32 reserved2[12];
149 union {
150 __u32 reserved3[12];
151 struct _fpx_sw_bytes sw_reserved; /* represents the extended
152 * state information */
153 };
154};
155
156#ifndef __KERNEL__
157/*
158 * User-space might still rely on the old definition:
159 */
160struct sigcontext {
161 __u64 r8;
162 __u64 r9;
163 __u64 r10;
164 __u64 r11;
165 __u64 r12;
166 __u64 r13;
167 __u64 r14;
168 __u64 r15;
169 __u64 rdi;
170 __u64 rsi;
171 __u64 rbp;
172 __u64 rbx;
173 __u64 rdx;
174 __u64 rax;
175 __u64 rcx;
176 __u64 rsp;
177 __u64 rip;
178 __u64 eflags; /* RFLAGS */
179 __u16 cs;
180 __u16 gs;
181 __u16 fs;
182 __u16 __pad0;
183 __u64 err;
184 __u64 trapno;
185 __u64 oldmask;
186 __u64 cr2;
187 struct _fpstate __user *fpstate; /* zero when no FPU context */
188#ifdef __ILP32__
189 __u32 __fpstate_pad;
190#endif
191 __u64 reserved1[8];
192};
193#endif /* !__KERNEL__ */
194
195#endif /* !__i386__ */
196
197struct _xsave_hdr {
198 __u64 xstate_bv;
199 __u64 reserved1[2];
200 __u64 reserved2[5];
201};
202
203struct _ymmh_state {
204 /* 16 * 16 bytes for each YMMH-reg */
205 __u32 ymmh_space[64];
206};
207
208/*
209 * Extended state pointed by the fpstate pointer in the sigcontext.
210 * In addition to the fpstate, information encoded in the xstate_hdr
211 * indicates the presence of other extended state information
212 * supported by the processor and OS.
213 */
214struct _xstate {
215 struct _fpstate fpstate;
216 struct _xsave_hdr xstate_hdr;
217 struct _ymmh_state ymmh;
218 /* new processor state extensions go here */
219};
220
221#endif /* _UAPI_ASM_X86_SIGCONTEXT_H */
diff --git a/arch/x86/include/uapi/asm/sigcontext32.h b/arch/x86/include/uapi/asm/sigcontext32.h
deleted file mode 100644
index ad1478c4ae1..00000000000
--- a/arch/x86/include/uapi/asm/sigcontext32.h
+++ /dev/null
@@ -1,77 +0,0 @@
1#ifndef _ASM_X86_SIGCONTEXT32_H
2#define _ASM_X86_SIGCONTEXT32_H
3
4#include <linux/types.h>
5
6/* signal context for 32bit programs. */
7
8#define X86_FXSR_MAGIC 0x0000
9
10struct _fpreg {
11 unsigned short significand[4];
12 unsigned short exponent;
13};
14
15struct _fpxreg {
16 unsigned short significand[4];
17 unsigned short exponent;
18 unsigned short padding[3];
19};
20
21struct _xmmreg {
22 __u32 element[4];
23};
24
25/* FSAVE frame with extensions */
26struct _fpstate_ia32 {
27 /* Regular FPU environment */
28 __u32 cw;
29 __u32 sw;
30 __u32 tag; /* not compatible to 64bit twd */
31 __u32 ipoff;
32 __u32 cssel;
33 __u32 dataoff;
34 __u32 datasel;
35 struct _fpreg _st[8];
36 unsigned short status;
37 unsigned short magic; /* 0xffff = regular FPU data only */
38
39 /* FXSR FPU environment */
40 __u32 _fxsr_env[6];
41 __u32 mxcsr;
42 __u32 reserved;
43 struct _fpxreg _fxsr_st[8];
44 struct _xmmreg _xmm[8]; /* It's actually 16 */
45 __u32 padding[44];
46 union {
47 __u32 padding2[12];
48 struct _fpx_sw_bytes sw_reserved;
49 };
50};
51
52struct sigcontext_ia32 {
53 unsigned short gs, __gsh;
54 unsigned short fs, __fsh;
55 unsigned short es, __esh;
56 unsigned short ds, __dsh;
57 unsigned int di;
58 unsigned int si;
59 unsigned int bp;
60 unsigned int sp;
61 unsigned int bx;
62 unsigned int dx;
63 unsigned int cx;
64 unsigned int ax;
65 unsigned int trapno;
66 unsigned int err;
67 unsigned int ip;
68 unsigned short cs, __csh;
69 unsigned int flags;
70 unsigned int sp_at_signal;
71 unsigned short ss, __ssh;
72 unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
73 unsigned int oldmask;
74 unsigned int cr2;
75};
76
77#endif /* _ASM_X86_SIGCONTEXT32_H */
diff --git a/arch/x86/include/uapi/asm/siginfo.h b/arch/x86/include/uapi/asm/siginfo.h
deleted file mode 100644
index 34c47b3341c..00000000000
--- a/arch/x86/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _ASM_X86_SIGINFO_H
2#define _ASM_X86_SIGINFO_H
3
4#ifdef __x86_64__
5# ifdef __ILP32__ /* x32 */
6typedef long long __kernel_si_clock_t __attribute__((aligned(4)));
7# define __ARCH_SI_CLOCK_T __kernel_si_clock_t
8# define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8)))
9# else /* x86-64 */
10# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
11# endif
12#endif
13
14#include <asm-generic/siginfo.h>
15
16#endif /* _ASM_X86_SIGINFO_H */
diff --git a/arch/x86/include/uapi/asm/signal.h b/arch/x86/include/uapi/asm/signal.h
deleted file mode 100644
index aa7d6ae39e0..00000000000
--- a/arch/x86/include/uapi/asm/signal.h
+++ /dev/null
@@ -1,139 +0,0 @@
1#ifndef _UAPI_ASM_X86_SIGNAL_H
2#define _UAPI_ASM_X86_SIGNAL_H
3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6#include <linux/time.h>
7#include <linux/compiler.h>
8
9/* Avoid too many header ordering problems. */
10struct siginfo;
11
12#ifndef __KERNEL__
13/* Here we must cater to libcs that poke about in kernel headers. */
14
15#define NSIG 32
16typedef unsigned long sigset_t;
17
18#endif /* __KERNEL__ */
19#endif /* __ASSEMBLY__ */
20
21
22#define SIGHUP 1
23#define SIGINT 2
24#define SIGQUIT 3
25#define SIGILL 4
26#define SIGTRAP 5
27#define SIGABRT 6
28#define SIGIOT 6
29#define SIGBUS 7
30#define SIGFPE 8
31#define SIGKILL 9
32#define SIGUSR1 10
33#define SIGSEGV 11
34#define SIGUSR2 12
35#define SIGPIPE 13
36#define SIGALRM 14
37#define SIGTERM 15
38#define SIGSTKFLT 16
39#define SIGCHLD 17
40#define SIGCONT 18
41#define SIGSTOP 19
42#define SIGTSTP 20
43#define SIGTTIN 21
44#define SIGTTOU 22
45#define SIGURG 23
46#define SIGXCPU 24
47#define SIGXFSZ 25
48#define SIGVTALRM 26
49#define SIGPROF 27
50#define SIGWINCH 28
51#define SIGIO 29
52#define SIGPOLL SIGIO
53/*
54#define SIGLOST 29
55*/
56#define SIGPWR 30
57#define SIGSYS 31
58#define SIGUNUSED 31
59
60/* These should not be considered constants from userland. */
61#define SIGRTMIN 32
62#define SIGRTMAX _NSIG
63
64/*
65 * SA_FLAGS values:
66 *
67 * SA_ONSTACK indicates that a registered stack_t will be used.
68 * SA_RESTART flag to get restarting signals (which were the default long ago)
69 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
70 * SA_RESETHAND clears the handler when the signal is delivered.
71 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
72 * SA_NODEFER prevents the current signal from being masked in the handler.
73 *
74 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
75 * Unix names RESETHAND and NODEFER respectively.
76 */
77#define SA_NOCLDSTOP 0x00000001u
78#define SA_NOCLDWAIT 0x00000002u
79#define SA_SIGINFO 0x00000004u
80#define SA_ONSTACK 0x08000000u
81#define SA_RESTART 0x10000000u
82#define SA_NODEFER 0x40000000u
83#define SA_RESETHAND 0x80000000u
84
85#define SA_NOMASK SA_NODEFER
86#define SA_ONESHOT SA_RESETHAND
87
88#define SA_RESTORER 0x04000000
89
90#define MINSIGSTKSZ 2048
91#define SIGSTKSZ 8192
92
93#include <asm-generic/signal-defs.h>
94
95#ifndef __ASSEMBLY__
96
97
98#ifdef __i386__
99# ifndef __KERNEL__
100/* Here we must cater to libcs that poke about in kernel headers. */
101
102struct sigaction {
103 union {
104 __sighandler_t _sa_handler;
105 void (*_sa_sigaction)(int, struct siginfo *, void *);
106 } _u;
107 sigset_t sa_mask;
108 unsigned long sa_flags;
109 void (*sa_restorer)(void);
110};
111
112#define sa_handler _u._sa_handler
113#define sa_sigaction _u._sa_sigaction
114
115# endif /* ! __KERNEL__ */
116#else /* __i386__ */
117
118struct sigaction {
119 __sighandler_t sa_handler;
120 unsigned long sa_flags;
121 __sigrestore_t sa_restorer;
122 sigset_t sa_mask; /* mask last for extensibility */
123};
124
125struct k_sigaction {
126 struct sigaction sa;
127};
128
129#endif /* !__i386__ */
130
131typedef struct sigaltstack {
132 void __user *ss_sp;
133 int ss_flags;
134 size_t ss_size;
135} stack_t;
136
137#endif /* __ASSEMBLY__ */
138
139#endif /* _UAPI_ASM_X86_SIGNAL_H */
diff --git a/arch/x86/include/uapi/asm/socket.h b/arch/x86/include/uapi/asm/socket.h
deleted file mode 100644
index 6b71384b9d8..00000000000
--- a/arch/x86/include/uapi/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/socket.h>
diff --git a/arch/x86/include/uapi/asm/sockios.h b/arch/x86/include/uapi/asm/sockios.h
deleted file mode 100644
index def6d4746ee..00000000000
--- a/arch/x86/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/sockios.h>
diff --git a/arch/x86/include/uapi/asm/stat.h b/arch/x86/include/uapi/asm/stat.h
deleted file mode 100644
index 7b3ddc34858..00000000000
--- a/arch/x86/include/uapi/asm/stat.h
+++ /dev/null
@@ -1,135 +0,0 @@
1#ifndef _ASM_X86_STAT_H
2#define _ASM_X86_STAT_H
3
4#define STAT_HAVE_NSEC 1
5
6#ifdef __i386__
7struct stat {
8 unsigned long st_dev;
9 unsigned long st_ino;
10 unsigned short st_mode;
11 unsigned short st_nlink;
12 unsigned short st_uid;
13 unsigned short st_gid;
14 unsigned long st_rdev;
15 unsigned long st_size;
16 unsigned long st_blksize;
17 unsigned long st_blocks;
18 unsigned long st_atime;
19 unsigned long st_atime_nsec;
20 unsigned long st_mtime;
21 unsigned long st_mtime_nsec;
22 unsigned long st_ctime;
23 unsigned long st_ctime_nsec;
24 unsigned long __unused4;
25 unsigned long __unused5;
26};
27
28/* We don't need to memset the whole thing just to initialize the padding */
29#define INIT_STRUCT_STAT_PADDING(st) do { \
30 st.__unused4 = 0; \
31 st.__unused5 = 0; \
32} while (0)
33
34#define STAT64_HAS_BROKEN_ST_INO 1
35
36/* This matches struct stat64 in glibc2.1, hence the absolutely
37 * insane amounts of padding around dev_t's.
38 */
39struct stat64 {
40 unsigned long long st_dev;
41 unsigned char __pad0[4];
42
43 unsigned long __st_ino;
44
45 unsigned int st_mode;
46 unsigned int st_nlink;
47
48 unsigned long st_uid;
49 unsigned long st_gid;
50
51 unsigned long long st_rdev;
52 unsigned char __pad3[4];
53
54 long long st_size;
55 unsigned long st_blksize;
56
57 /* Number 512-byte blocks allocated. */
58 unsigned long long st_blocks;
59
60 unsigned long st_atime;
61 unsigned long st_atime_nsec;
62
63 unsigned long st_mtime;
64 unsigned int st_mtime_nsec;
65
66 unsigned long st_ctime;
67 unsigned long st_ctime_nsec;
68
69 unsigned long long st_ino;
70};
71
72/* We don't need to memset the whole thing just to initialize the padding */
73#define INIT_STRUCT_STAT64_PADDING(st) do { \
74 memset(&st.__pad0, 0, sizeof(st.__pad0)); \
75 memset(&st.__pad3, 0, sizeof(st.__pad3)); \
76} while (0)
77
78#else /* __i386__ */
79
80struct stat {
81 unsigned long st_dev;
82 unsigned long st_ino;
83 unsigned long st_nlink;
84
85 unsigned int st_mode;
86 unsigned int st_uid;
87 unsigned int st_gid;
88 unsigned int __pad0;
89 unsigned long st_rdev;
90 long st_size;
91 long st_blksize;
92 long st_blocks; /* Number 512-byte blocks allocated. */
93
94 unsigned long st_atime;
95 unsigned long st_atime_nsec;
96 unsigned long st_mtime;
97 unsigned long st_mtime_nsec;
98 unsigned long st_ctime;
99 unsigned long st_ctime_nsec;
100 long __unused[3];
101};
102
103/* We don't need to memset the whole thing just to initialize the padding */
104#define INIT_STRUCT_STAT_PADDING(st) do { \
105 st.__pad0 = 0; \
106 st.__unused[0] = 0; \
107 st.__unused[1] = 0; \
108 st.__unused[2] = 0; \
109} while (0)
110
111#endif
112
113/* for 32bit emulation and 32 bit kernels */
114struct __old_kernel_stat {
115 unsigned short st_dev;
116 unsigned short st_ino;
117 unsigned short st_mode;
118 unsigned short st_nlink;
119 unsigned short st_uid;
120 unsigned short st_gid;
121 unsigned short st_rdev;
122#ifdef __i386__
123 unsigned long st_size;
124 unsigned long st_atime;
125 unsigned long st_mtime;
126 unsigned long st_ctime;
127#else
128 unsigned int st_size;
129 unsigned int st_atime;
130 unsigned int st_mtime;
131 unsigned int st_ctime;
132#endif
133};
134
135#endif /* _ASM_X86_STAT_H */
diff --git a/arch/x86/include/uapi/asm/statfs.h b/arch/x86/include/uapi/asm/statfs.h
deleted file mode 100644
index 2d0adbf99a8..00000000000
--- a/arch/x86/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_STATFS_H
2#define _ASM_X86_STATFS_H
3
4/*
5 * We need compat_statfs64 to be packed, because the i386 ABI won't
6 * add padding at the end to bring it to a multiple of 8 bytes, but
7 * the x86_64 ABI will.
8 */
9#define ARCH_PACK_COMPAT_STATFS64 __attribute__((packed,aligned(4)))
10
11#include <asm-generic/statfs.h>
12#endif /* _ASM_X86_STATFS_H */
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
deleted file mode 100644
index b5d7640abc5..00000000000
--- a/arch/x86/include/uapi/asm/svm.h
+++ /dev/null
@@ -1,132 +0,0 @@
1#ifndef _UAPI__SVM_H
2#define _UAPI__SVM_H
3
4#define SVM_EXIT_READ_CR0 0x000
5#define SVM_EXIT_READ_CR3 0x003
6#define SVM_EXIT_READ_CR4 0x004
7#define SVM_EXIT_READ_CR8 0x008
8#define SVM_EXIT_WRITE_CR0 0x010
9#define SVM_EXIT_WRITE_CR3 0x013
10#define SVM_EXIT_WRITE_CR4 0x014
11#define SVM_EXIT_WRITE_CR8 0x018
12#define SVM_EXIT_READ_DR0 0x020
13#define SVM_EXIT_READ_DR1 0x021
14#define SVM_EXIT_READ_DR2 0x022
15#define SVM_EXIT_READ_DR3 0x023
16#define SVM_EXIT_READ_DR4 0x024
17#define SVM_EXIT_READ_DR5 0x025
18#define SVM_EXIT_READ_DR6 0x026
19#define SVM_EXIT_READ_DR7 0x027
20#define SVM_EXIT_WRITE_DR0 0x030
21#define SVM_EXIT_WRITE_DR1 0x031
22#define SVM_EXIT_WRITE_DR2 0x032
23#define SVM_EXIT_WRITE_DR3 0x033
24#define SVM_EXIT_WRITE_DR4 0x034
25#define SVM_EXIT_WRITE_DR5 0x035
26#define SVM_EXIT_WRITE_DR6 0x036
27#define SVM_EXIT_WRITE_DR7 0x037
28#define SVM_EXIT_EXCP_BASE 0x040
29#define SVM_EXIT_INTR 0x060
30#define SVM_EXIT_NMI 0x061
31#define SVM_EXIT_SMI 0x062
32#define SVM_EXIT_INIT 0x063
33#define SVM_EXIT_VINTR 0x064
34#define SVM_EXIT_CR0_SEL_WRITE 0x065
35#define SVM_EXIT_IDTR_READ 0x066
36#define SVM_EXIT_GDTR_READ 0x067
37#define SVM_EXIT_LDTR_READ 0x068
38#define SVM_EXIT_TR_READ 0x069
39#define SVM_EXIT_IDTR_WRITE 0x06a
40#define SVM_EXIT_GDTR_WRITE 0x06b
41#define SVM_EXIT_LDTR_WRITE 0x06c
42#define SVM_EXIT_TR_WRITE 0x06d
43#define SVM_EXIT_RDTSC 0x06e
44#define SVM_EXIT_RDPMC 0x06f
45#define SVM_EXIT_PUSHF 0x070
46#define SVM_EXIT_POPF 0x071
47#define SVM_EXIT_CPUID 0x072
48#define SVM_EXIT_RSM 0x073
49#define SVM_EXIT_IRET 0x074
50#define SVM_EXIT_SWINT 0x075
51#define SVM_EXIT_INVD 0x076
52#define SVM_EXIT_PAUSE 0x077
53#define SVM_EXIT_HLT 0x078
54#define SVM_EXIT_INVLPG 0x079
55#define SVM_EXIT_INVLPGA 0x07a
56#define SVM_EXIT_IOIO 0x07b
57#define SVM_EXIT_MSR 0x07c
58#define SVM_EXIT_TASK_SWITCH 0x07d
59#define SVM_EXIT_FERR_FREEZE 0x07e
60#define SVM_EXIT_SHUTDOWN 0x07f
61#define SVM_EXIT_VMRUN 0x080
62#define SVM_EXIT_VMMCALL 0x081
63#define SVM_EXIT_VMLOAD 0x082
64#define SVM_EXIT_VMSAVE 0x083
65#define SVM_EXIT_STGI 0x084
66#define SVM_EXIT_CLGI 0x085
67#define SVM_EXIT_SKINIT 0x086
68#define SVM_EXIT_RDTSCP 0x087
69#define SVM_EXIT_ICEBP 0x088
70#define SVM_EXIT_WBINVD 0x089
71#define SVM_EXIT_MONITOR 0x08a
72#define SVM_EXIT_MWAIT 0x08b
73#define SVM_EXIT_MWAIT_COND 0x08c
74#define SVM_EXIT_XSETBV 0x08d
75#define SVM_EXIT_NPF 0x400
76
77#define SVM_EXIT_ERR -1
78
79#define SVM_EXIT_REASONS \
80 { SVM_EXIT_READ_CR0, "read_cr0" }, \
81 { SVM_EXIT_READ_CR3, "read_cr3" }, \
82 { SVM_EXIT_READ_CR4, "read_cr4" }, \
83 { SVM_EXIT_READ_CR8, "read_cr8" }, \
84 { SVM_EXIT_WRITE_CR0, "write_cr0" }, \
85 { SVM_EXIT_WRITE_CR3, "write_cr3" }, \
86 { SVM_EXIT_WRITE_CR4, "write_cr4" }, \
87 { SVM_EXIT_WRITE_CR8, "write_cr8" }, \
88 { SVM_EXIT_READ_DR0, "read_dr0" }, \
89 { SVM_EXIT_READ_DR1, "read_dr1" }, \
90 { SVM_EXIT_READ_DR2, "read_dr2" }, \
91 { SVM_EXIT_READ_DR3, "read_dr3" }, \
92 { SVM_EXIT_WRITE_DR0, "write_dr0" }, \
93 { SVM_EXIT_WRITE_DR1, "write_dr1" }, \
94 { SVM_EXIT_WRITE_DR2, "write_dr2" }, \
95 { SVM_EXIT_WRITE_DR3, "write_dr3" }, \
96 { SVM_EXIT_WRITE_DR5, "write_dr5" }, \
97 { SVM_EXIT_WRITE_DR7, "write_dr7" }, \
98 { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
99 { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
100 { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
101 { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
102 { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
103 { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
104 { SVM_EXIT_INTR, "interrupt" }, \
105 { SVM_EXIT_NMI, "nmi" }, \
106 { SVM_EXIT_SMI, "smi" }, \
107 { SVM_EXIT_INIT, "init" }, \
108 { SVM_EXIT_VINTR, "vintr" }, \
109 { SVM_EXIT_CPUID, "cpuid" }, \
110 { SVM_EXIT_INVD, "invd" }, \
111 { SVM_EXIT_HLT, "hlt" }, \
112 { SVM_EXIT_INVLPG, "invlpg" }, \
113 { SVM_EXIT_INVLPGA, "invlpga" }, \
114 { SVM_EXIT_IOIO, "io" }, \
115 { SVM_EXIT_MSR, "msr" }, \
116 { SVM_EXIT_TASK_SWITCH, "task_switch" }, \
117 { SVM_EXIT_SHUTDOWN, "shutdown" }, \
118 { SVM_EXIT_VMRUN, "vmrun" }, \
119 { SVM_EXIT_VMMCALL, "hypercall" }, \
120 { SVM_EXIT_VMLOAD, "vmload" }, \
121 { SVM_EXIT_VMSAVE, "vmsave" }, \
122 { SVM_EXIT_STGI, "stgi" }, \
123 { SVM_EXIT_CLGI, "clgi" }, \
124 { SVM_EXIT_SKINIT, "skinit" }, \
125 { SVM_EXIT_WBINVD, "wbinvd" }, \
126 { SVM_EXIT_MONITOR, "monitor" }, \
127 { SVM_EXIT_MWAIT, "mwait" }, \
128 { SVM_EXIT_XSETBV, "xsetbv" }, \
129 { SVM_EXIT_NPF, "npf" }
130
131
132#endif /* _UAPI__SVM_H */
diff --git a/arch/x86/include/uapi/asm/swab.h b/arch/x86/include/uapi/asm/swab.h
deleted file mode 100644
index 7f235c7105c..00000000000
--- a/arch/x86/include/uapi/asm/swab.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef _ASM_X86_SWAB_H
2#define _ASM_X86_SWAB_H
3
4#include <linux/types.h>
5#include <linux/compiler.h>
6
7static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
8{
9 asm("bswapl %0" : "=r" (val) : "0" (val));
10 return val;
11}
12#define __arch_swab32 __arch_swab32
13
14static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
15{
16#ifdef __i386__
17 union {
18 struct {
19 __u32 a;
20 __u32 b;
21 } s;
22 __u64 u;
23 } v;
24 v.u = val;
25 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
26 : "=r" (v.s.a), "=r" (v.s.b)
27 : "0" (v.s.a), "1" (v.s.b));
28 return v.u;
29#else /* __i386__ */
30 asm("bswapq %0" : "=r" (val) : "0" (val));
31 return val;
32#endif
33}
34#define __arch_swab64 __arch_swab64
35
36#endif /* _ASM_X86_SWAB_H */
diff --git a/arch/x86/include/uapi/asm/termbits.h b/arch/x86/include/uapi/asm/termbits.h
deleted file mode 100644
index 3935b106de7..00000000000
--- a/arch/x86/include/uapi/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/termbits.h>
diff --git a/arch/x86/include/uapi/asm/termios.h b/arch/x86/include/uapi/asm/termios.h
deleted file mode 100644
index 280d78a9d96..00000000000
--- a/arch/x86/include/uapi/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/termios.h>
diff --git a/arch/x86/include/uapi/asm/types.h b/arch/x86/include/uapi/asm/types.h
deleted file mode 100644
index 8e8c23fef08..00000000000
--- a/arch/x86/include/uapi/asm/types.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_X86_TYPES_H
2#define _ASM_X86_TYPES_H
3
4#include <asm-generic/types.h>
5
6#endif /* _ASM_X86_TYPES_H */
diff --git a/arch/x86/include/uapi/asm/ucontext.h b/arch/x86/include/uapi/asm/ucontext.h
deleted file mode 100644
index b7c29c8017f..00000000000
--- a/arch/x86/include/uapi/asm/ucontext.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_UCONTEXT_H
2#define _ASM_X86_UCONTEXT_H
3
4#define UC_FP_XSTATE 0x1 /* indicates the presence of extended state
5 * information in the memory layout pointed
6 * by the fpstate pointer in the ucontext's
7 * sigcontext struct (uc_mcontext).
8 */
9
10#include <asm-generic/ucontext.h>
11
12#endif /* _ASM_X86_UCONTEXT_H */
diff --git a/arch/x86/include/uapi/asm/unistd.h b/arch/x86/include/uapi/asm/unistd.h
deleted file mode 100644
index a26df0d75cd..00000000000
--- a/arch/x86/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _UAPI_ASM_X86_UNISTD_H
2#define _UAPI_ASM_X86_UNISTD_H
3
4/* x32 syscall flag bit */
5#define __X32_SYSCALL_BIT 0x40000000
6
7#ifndef __KERNEL__
8# ifdef __i386__
9# include <asm/unistd_32.h>
10# elif defined(__ILP32__)
11# include <asm/unistd_x32.h>
12# else
13# include <asm/unistd_64.h>
14# endif
15#endif
16
17#endif /* _UAPI_ASM_X86_UNISTD_H */
diff --git a/arch/x86/include/uapi/asm/vm86.h b/arch/x86/include/uapi/asm/vm86.h
deleted file mode 100644
index e0b243e9d85..00000000000
--- a/arch/x86/include/uapi/asm/vm86.h
+++ /dev/null
@@ -1,129 +0,0 @@
1#ifndef _UAPI_ASM_X86_VM86_H
2#define _UAPI_ASM_X86_VM86_H
3
4/*
5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how
6 * the Pentium uses them. Linux will return from vm86 mode when both
7 * VIF and VIP is set.
8 *
9 * On a Pentium, we could probably optimize the virtual flags directly
10 * in the eflags register instead of doing it "by hand" in vflags...
11 *
12 * Linus
13 */
14
15#include <asm/processor-flags.h>
16
17#define BIOSSEG 0x0f000
18
19#define CPU_086 0
20#define CPU_186 1
21#define CPU_286 2
22#define CPU_386 3
23#define CPU_486 4
24#define CPU_586 5
25
26/*
27 * Return values for the 'vm86()' system call
28 */
29#define VM86_TYPE(retval) ((retval) & 0xff)
30#define VM86_ARG(retval) ((retval) >> 8)
31
32#define VM86_SIGNAL 0 /* return due to signal */
33#define VM86_UNKNOWN 1 /* unhandled GP fault
34 - IO-instruction or similar */
35#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
36#define VM86_STI 3 /* sti/popf/iret instruction enabled
37 virtual interrupts */
38
39/*
40 * Additional return values when invoking new vm86()
41 */
42#define VM86_PICRETURN 4 /* return due to pending PIC request */
43#define VM86_TRAP 6 /* return due to DOS-debugger request */
44
45/*
46 * function codes when invoking new vm86()
47 */
48#define VM86_PLUS_INSTALL_CHECK 0
49#define VM86_ENTER 1
50#define VM86_ENTER_NO_BYPASS 2
51#define VM86_REQUEST_IRQ 3
52#define VM86_FREE_IRQ 4
53#define VM86_GET_IRQ_BITS 5
54#define VM86_GET_AND_RESET_IRQ 6
55
56/*
57 * This is the stack-layout seen by the user space program when we have
58 * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
59 * is 'kernel_vm86_regs' (see below).
60 */
61
62struct vm86_regs {
63/*
64 * normal regs, with special meaning for the segment descriptors..
65 */
66 long ebx;
67 long ecx;
68 long edx;
69 long esi;
70 long edi;
71 long ebp;
72 long eax;
73 long __null_ds;
74 long __null_es;
75 long __null_fs;
76 long __null_gs;
77 long orig_eax;
78 long eip;
79 unsigned short cs, __csh;
80 long eflags;
81 long esp;
82 unsigned short ss, __ssh;
83/*
84 * these are specific to v86 mode:
85 */
86 unsigned short es, __esh;
87 unsigned short ds, __dsh;
88 unsigned short fs, __fsh;
89 unsigned short gs, __gsh;
90};
91
92struct revectored_struct {
93 unsigned long __map[8]; /* 256 bits */
94};
95
96struct vm86_struct {
97 struct vm86_regs regs;
98 unsigned long flags;
99 unsigned long screen_bitmap;
100 unsigned long cpu_type;
101 struct revectored_struct int_revectored;
102 struct revectored_struct int21_revectored;
103};
104
105/*
106 * flags masks
107 */
108#define VM86_SCREEN_BITMAP 0x0001
109
110struct vm86plus_info_struct {
111 unsigned long force_return_for_pic:1;
112 unsigned long vm86dbg_active:1; /* for debugger */
113 unsigned long vm86dbg_TFpendig:1; /* for debugger */
114 unsigned long unused:28;
115 unsigned long is_vm86pus:1; /* for vm86 internal use */
116 unsigned char vm86dbg_intxxtab[32]; /* for debugger */
117};
118struct vm86plus_struct {
119 struct vm86_regs regs;
120 unsigned long flags;
121 unsigned long screen_bitmap;
122 unsigned long cpu_type;
123 struct revectored_struct int_revectored;
124 struct revectored_struct int21_revectored;
125 struct vm86plus_info_struct vm86plus;
126};
127
128
129#endif /* _UAPI_ASM_X86_VM86_H */
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
deleted file mode 100644
index 979d03bce13..00000000000
--- a/arch/x86/include/uapi/asm/vmx.h
+++ /dev/null
@@ -1,109 +0,0 @@
1/*
2 * vmx.h: VMX Architecture related definitions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * A few random additions are:
19 * Copyright (C) 2006 Qumranet
20 * Avi Kivity <avi@qumranet.com>
21 * Yaniv Kamay <yaniv@qumranet.com>
22 *
23 */
24#ifndef _UAPIVMX_H
25#define _UAPIVMX_H
26
27
28#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
29
30#define EXIT_REASON_EXCEPTION_NMI 0
31#define EXIT_REASON_EXTERNAL_INTERRUPT 1
32#define EXIT_REASON_TRIPLE_FAULT 2
33
34#define EXIT_REASON_PENDING_INTERRUPT 7
35#define EXIT_REASON_NMI_WINDOW 8
36#define EXIT_REASON_TASK_SWITCH 9
37#define EXIT_REASON_CPUID 10
38#define EXIT_REASON_HLT 12
39#define EXIT_REASON_INVD 13
40#define EXIT_REASON_INVLPG 14
41#define EXIT_REASON_RDPMC 15
42#define EXIT_REASON_RDTSC 16
43#define EXIT_REASON_VMCALL 18
44#define EXIT_REASON_VMCLEAR 19
45#define EXIT_REASON_VMLAUNCH 20
46#define EXIT_REASON_VMPTRLD 21
47#define EXIT_REASON_VMPTRST 22
48#define EXIT_REASON_VMREAD 23
49#define EXIT_REASON_VMRESUME 24
50#define EXIT_REASON_VMWRITE 25
51#define EXIT_REASON_VMOFF 26
52#define EXIT_REASON_VMON 27
53#define EXIT_REASON_CR_ACCESS 28
54#define EXIT_REASON_DR_ACCESS 29
55#define EXIT_REASON_IO_INSTRUCTION 30
56#define EXIT_REASON_MSR_READ 31
57#define EXIT_REASON_MSR_WRITE 32
58#define EXIT_REASON_INVALID_STATE 33
59#define EXIT_REASON_MWAIT_INSTRUCTION 36
60#define EXIT_REASON_MONITOR_INSTRUCTION 39
61#define EXIT_REASON_PAUSE_INSTRUCTION 40
62#define EXIT_REASON_MCE_DURING_VMENTRY 41
63#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
64#define EXIT_REASON_APIC_ACCESS 44
65#define EXIT_REASON_EPT_VIOLATION 48
66#define EXIT_REASON_EPT_MISCONFIG 49
67#define EXIT_REASON_WBINVD 54
68#define EXIT_REASON_XSETBV 55
69#define EXIT_REASON_INVPCID 58
70
71#define VMX_EXIT_REASONS \
72 { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
73 { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
74 { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
75 { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
76 { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
77 { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
78 { EXIT_REASON_CPUID, "CPUID" }, \
79 { EXIT_REASON_HLT, "HLT" }, \
80 { EXIT_REASON_INVLPG, "INVLPG" }, \
81 { EXIT_REASON_RDPMC, "RDPMC" }, \
82 { EXIT_REASON_RDTSC, "RDTSC" }, \
83 { EXIT_REASON_VMCALL, "VMCALL" }, \
84 { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
85 { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
86 { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
87 { EXIT_REASON_VMPTRST, "VMPTRST" }, \
88 { EXIT_REASON_VMREAD, "VMREAD" }, \
89 { EXIT_REASON_VMRESUME, "VMRESUME" }, \
90 { EXIT_REASON_VMWRITE, "VMWRITE" }, \
91 { EXIT_REASON_VMOFF, "VMOFF" }, \
92 { EXIT_REASON_VMON, "VMON" }, \
93 { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
94 { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
95 { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
96 { EXIT_REASON_MSR_READ, "MSR_READ" }, \
97 { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
98 { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
99 { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
100 { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
101 { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
102 { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
103 { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
104 { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
105 { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
106 { EXIT_REASON_WBINVD, "WBINVD" }
107
108
109#endif /* _UAPIVMX_H */
diff --git a/arch/x86/include/uapi/asm/vsyscall.h b/arch/x86/include/uapi/asm/vsyscall.h
deleted file mode 100644
index 85dc1b3825a..00000000000
--- a/arch/x86/include/uapi/asm/vsyscall.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _UAPI_ASM_X86_VSYSCALL_H
2#define _UAPI_ASM_X86_VSYSCALL_H
3
4enum vsyscall_num {
5 __NR_vgettimeofday,
6 __NR_vtime,
7 __NR_vgetcpu,
8};
9
10#define VSYSCALL_START (-10UL << 20)
11#define VSYSCALL_SIZE 1024
12#define VSYSCALL_END (-2UL << 20)
13#define VSYSCALL_MAPPED_PAGES 1
14#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
15
16
17#endif /* _UAPI_ASM_X86_VSYSCALL_H */