aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/apic.h23
-rw-r--r--arch/x86/include/asm/apicdef.h2
-rw-r--r--arch/x86/include/asm/asm.h38
-rw-r--r--arch/x86/include/asm/atomic64_32.h10
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/bootparam.h3
-rw-r--r--arch/x86/include/asm/compat.h2
-rw-r--r--arch/x86/include/asm/current.h2
-rw-r--r--arch/x86/include/asm/desc.h1
-rw-r--r--arch/x86/include/asm/device.h4
-rw-r--r--arch/x86/include/asm/dma-mapping.h9
-rw-r--r--arch/x86/include/asm/fpu-internal.h6
-rw-r--r--arch/x86/include/asm/hardirq.h9
-rw-r--r--arch/x86/include/asm/ia32.h6
-rw-r--r--arch/x86/include/asm/io_apic.h35
-rw-r--r--arch/x86/include/asm/irq_regs.h4
-rw-r--r--arch/x86/include/asm/irq_remapping.h118
-rw-r--r--arch/x86/include/asm/kbdleds.h17
-rw-r--r--arch/x86/include/asm/kdebug.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/include/asm/kvm_para.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h12
-rw-r--r--arch/x86/include/asm/mmzone_32.h6
-rw-r--r--arch/x86/include/asm/msr.h9
-rw-r--r--arch/x86/include/asm/nmi.h22
-rw-r--r--arch/x86/include/asm/nops.h4
-rw-r--r--arch/x86/include/asm/page_32_types.h4
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/paravirt.h6
-rw-r--r--arch/x86/include/asm/percpu.h24
-rw-r--r--arch/x86/include/asm/processor.h5
-rw-r--r--arch/x86/include/asm/segment.h4
-rw-r--r--arch/x86/include/asm/smp.h15
-rw-r--r--arch/x86/include/asm/spinlock.h2
-rw-r--r--arch/x86/include/asm/stackprotector.h4
-rw-r--r--arch/x86/include/asm/stat.h21
-rw-r--r--arch/x86/include/asm/syscall.h27
-rw-r--r--arch/x86/include/asm/thread_info.h23
-rw-r--r--arch/x86/include/asm/tlbflush.h10
-rw-r--r--arch/x86/include/asm/topology.h38
-rw-r--r--arch/x86/include/asm/uaccess.h25
-rw-r--r--arch/x86/include/asm/word-at-a-time.h33
-rw-r--r--arch/x86/include/asm/x86_init.h9
-rw-r--r--arch/x86/include/asm/xsave.h10
44 files changed, 388 insertions, 229 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index d85410171260..eaff4790ed96 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -138,6 +138,11 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
138 wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); 138 wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
139} 139}
140 140
141static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
142{
143 wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
144}
145
141static inline u32 native_apic_msr_read(u32 reg) 146static inline u32 native_apic_msr_read(u32 reg)
142{ 147{
143 u64 msr; 148 u64 msr;
@@ -351,6 +356,14 @@ struct apic {
351 /* apic ops */ 356 /* apic ops */
352 u32 (*read)(u32 reg); 357 u32 (*read)(u32 reg);
353 void (*write)(u32 reg, u32 v); 358 void (*write)(u32 reg, u32 v);
359 /*
360 * ->eoi_write() has the same signature as ->write().
361 *
362 * Drivers can support both ->eoi_write() and ->write() by passing the same
363 * callback value. Kernel can override ->eoi_write() and fall back
364 * on write for EOI.
365 */
366 void (*eoi_write)(u32 reg, u32 v);
354 u64 (*icr_read)(void); 367 u64 (*icr_read)(void);
355 void (*icr_write)(u32 low, u32 high); 368 void (*icr_write)(u32 low, u32 high);
356 void (*wait_icr_idle)(void); 369 void (*wait_icr_idle)(void);
@@ -426,6 +439,11 @@ static inline void apic_write(u32 reg, u32 val)
426 apic->write(reg, val); 439 apic->write(reg, val);
427} 440}
428 441
442static inline void apic_eoi(void)
443{
444 apic->eoi_write(APIC_EOI, APIC_EOI_ACK);
445}
446
429static inline u64 apic_icr_read(void) 447static inline u64 apic_icr_read(void)
430{ 448{
431 return apic->icr_read(); 449 return apic->icr_read();
@@ -450,6 +468,7 @@ static inline u32 safe_apic_wait_icr_idle(void)
450 468
451static inline u32 apic_read(u32 reg) { return 0; } 469static inline u32 apic_read(u32 reg) { return 0; }
452static inline void apic_write(u32 reg, u32 val) { } 470static inline void apic_write(u32 reg, u32 val) { }
471static inline void apic_eoi(void) { }
453static inline u64 apic_icr_read(void) { return 0; } 472static inline u64 apic_icr_read(void) { return 0; }
454static inline void apic_icr_write(u32 low, u32 high) { } 473static inline void apic_icr_write(u32 low, u32 high) { }
455static inline void apic_wait_icr_idle(void) { } 474static inline void apic_wait_icr_idle(void) { }
@@ -463,9 +482,7 @@ static inline void ack_APIC_irq(void)
463 * ack_APIC_irq() actually gets compiled as a single instruction 482 * ack_APIC_irq() actually gets compiled as a single instruction
464 * ... yummie. 483 * ... yummie.
465 */ 484 */
466 485 apic_eoi();
467 /* Docs say use 0 for future compatibility */
468 apic_write(APIC_EOI, 0);
469} 486}
470 487
471static inline unsigned default_get_apic_id(unsigned long x) 488static inline unsigned default_get_apic_id(unsigned long x)
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 134bba00df09..c46bb99d5fb2 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -37,7 +37,7 @@
37#define APIC_ARBPRI_MASK 0xFFu 37#define APIC_ARBPRI_MASK 0xFFu
38#define APIC_PROCPRI 0xA0 38#define APIC_PROCPRI 0xA0
39#define APIC_EOI 0xB0 39#define APIC_EOI 0xB0
40#define APIC_EIO_ACK 0x0 40#define APIC_EOI_ACK 0x0 /* Docs say 0 for future compat. */
41#define APIC_RRR 0xC0 41#define APIC_RRR 0xC0
42#define APIC_LDR 0xD0 42#define APIC_LDR 0xD0
43#define APIC_LDR_MASK (0xFFu << 24) 43#define APIC_LDR_MASK (0xFFu << 24)
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 9412d6558c88..1c2d247f65ce 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -4,11 +4,9 @@
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
6# define __ASM_FORM_COMMA(x) x, 6# define __ASM_FORM_COMMA(x) x,
7# define __ASM_EX_SEC .section __ex_table, "a"
8#else 7#else
9# define __ASM_FORM(x) " " #x " " 8# define __ASM_FORM(x) " " #x " "
10# define __ASM_FORM_COMMA(x) " " #x "," 9# define __ASM_FORM_COMMA(x) " " #x ","
11# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
12#endif 10#endif
13 11
14#ifdef CONFIG_X86_32 12#ifdef CONFIG_X86_32
@@ -42,17 +40,33 @@
42 40
43/* Exception table entry */ 41/* Exception table entry */
44#ifdef __ASSEMBLY__ 42#ifdef __ASSEMBLY__
45# define _ASM_EXTABLE(from,to) \ 43# define _ASM_EXTABLE(from,to) \
46 __ASM_EX_SEC ; \ 44 .pushsection "__ex_table","a" ; \
47 _ASM_ALIGN ; \ 45 .balign 8 ; \
48 _ASM_PTR from , to ; \ 46 .long (from) - . ; \
49 .previous 47 .long (to) - . ; \
48 .popsection
49
50# define _ASM_EXTABLE_EX(from,to) \
51 .pushsection "__ex_table","a" ; \
52 .balign 8 ; \
53 .long (from) - . ; \
54 .long (to) - . + 0x7ffffff0 ; \
55 .popsection
50#else 56#else
51# define _ASM_EXTABLE(from,to) \ 57# define _ASM_EXTABLE(from,to) \
52 __ASM_EX_SEC \ 58 " .pushsection \"__ex_table\",\"a\"\n" \
53 _ASM_ALIGN "\n" \ 59 " .balign 8\n" \
54 _ASM_PTR #from "," #to "\n" \ 60 " .long (" #from ") - .\n" \
55 " .previous\n" 61 " .long (" #to ") - .\n" \
62 " .popsection\n"
63
64# define _ASM_EXTABLE_EX(from,to) \
65 " .pushsection \"__ex_table\",\"a\"\n" \
66 " .balign 8\n" \
67 " .long (" #from ") - .\n" \
68 " .long (" #to ") - . + 0x7ffffff0\n" \
69 " .popsection\n"
56#endif 70#endif
57 71
58#endif /* _ASM_X86_ASM_H */ 72#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 198119910da5..b154de75c90c 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -63,7 +63,7 @@ ATOMIC64_DECL(add_unless);
63 63
64/** 64/**
65 * atomic64_cmpxchg - cmpxchg atomic64 variable 65 * atomic64_cmpxchg - cmpxchg atomic64 variable
66 * @p: pointer to type atomic64_t 66 * @v: pointer to type atomic64_t
67 * @o: expected value 67 * @o: expected value
68 * @n: new value 68 * @n: new value
69 * 69 *
@@ -98,7 +98,7 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n)
98/** 98/**
99 * atomic64_set - set atomic64 variable 99 * atomic64_set - set atomic64 variable
100 * @v: pointer to type atomic64_t 100 * @v: pointer to type atomic64_t
101 * @n: value to assign 101 * @i: value to assign
102 * 102 *
103 * Atomically sets the value of @v to @n. 103 * Atomically sets the value of @v to @n.
104 */ 104 */
@@ -200,7 +200,7 @@ static inline long long atomic64_sub(long long i, atomic64_t *v)
200 * atomic64_sub_and_test - subtract value from variable and test result 200 * atomic64_sub_and_test - subtract value from variable and test result
201 * @i: integer value to subtract 201 * @i: integer value to subtract
202 * @v: pointer to type atomic64_t 202 * @v: pointer to type atomic64_t
203 * 203 *
204 * Atomically subtracts @i from @v and returns 204 * Atomically subtracts @i from @v and returns
205 * true if the result is zero, or false for all 205 * true if the result is zero, or false for all
206 * other cases. 206 * other cases.
@@ -224,9 +224,9 @@ static inline void atomic64_inc(atomic64_t *v)
224 224
225/** 225/**
226 * atomic64_dec - decrement atomic64 variable 226 * atomic64_dec - decrement atomic64 variable
227 * @ptr: pointer to type atomic64_t 227 * @v: pointer to type atomic64_t
228 * 228 *
229 * Atomically decrements @ptr by 1. 229 * Atomically decrements @v by 1.
230 */ 230 */
231static inline void atomic64_dec(atomic64_t *v) 231static inline void atomic64_dec(atomic64_t *v)
232{ 232{
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 5e1a2eef3e7c..b13fe63bdc59 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -19,7 +19,7 @@
19#ifdef CONFIG_X86_64 19#ifdef CONFIG_X86_64
20#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT 20#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
21#else 21#else
22#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER) 22#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER)
23#endif 23#endif
24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) 24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
25 25
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h
index 2f90c51cc49d..eb45aa6b1f27 100644
--- a/arch/x86/include/asm/bootparam.h
+++ b/arch/x86/include/asm/bootparam.h
@@ -112,7 +112,8 @@ struct boot_params {
112 __u8 e820_entries; /* 0x1e8 */ 112 __u8 e820_entries; /* 0x1e8 */
113 __u8 eddbuf_entries; /* 0x1e9 */ 113 __u8 eddbuf_entries; /* 0x1e9 */
114 __u8 edd_mbr_sig_buf_entries; /* 0x1ea */ 114 __u8 edd_mbr_sig_buf_entries; /* 0x1ea */
115 __u8 _pad6[6]; /* 0x1eb */ 115 __u8 kbd_status; /* 0x1eb */
116 __u8 _pad6[5]; /* 0x1ec */
116 struct setup_header hdr; /* setup header */ /* 0x1f1 */ 117 struct setup_header hdr; /* setup header */ /* 0x1f1 */
117 __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)]; 118 __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
118 __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */ 119 __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index d6805798d6fc..fedf32b73e65 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -229,7 +229,7 @@ static inline void __user *arch_compat_alloc_user_space(long len)
229 sp = task_pt_regs(current)->sp; 229 sp = task_pt_regs(current)->sp;
230 } else { 230 } else {
231 /* -128 for the x32 ABI redzone */ 231 /* -128 for the x32 ABI redzone */
232 sp = percpu_read(old_rsp) - 128; 232 sp = this_cpu_read(old_rsp) - 128;
233 } 233 }
234 234
235 return (void __user *)round_down(sp - len, 16); 235 return (void __user *)round_down(sp - len, 16);
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 4d447b732d82..9476c04ee635 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task);
11 11
12static __always_inline struct task_struct *get_current(void) 12static __always_inline struct task_struct *get_current(void)
13{ 13{
14 return percpu_read_stable(current_task); 14 return this_cpu_read_stable(current_task);
15} 15}
16 16
17#define current get_current() 17#define current get_current()
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index e95822d683f4..8bf1c06070d5 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -6,6 +6,7 @@
6#include <asm/mmu.h> 6#include <asm/mmu.h>
7 7
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/percpu.h>
9 10
10static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) 11static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
11{ 12{
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 63a2a03d7d51..93e1c55f14ab 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -5,8 +5,8 @@ struct dev_archdata {
5#ifdef CONFIG_ACPI 5#ifdef CONFIG_ACPI
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_DEV_DMA_OPS
9struct dma_map_ops *dma_ops; 9 struct dma_map_ops *dma_ops;
10#endif 10#endif
11#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) 11#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
12 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 4b4331d71935..61c0bd25845a 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -30,7 +30,7 @@ extern struct dma_map_ops *dma_ops;
30 30
31static inline struct dma_map_ops *get_dma_ops(struct device *dev) 31static inline struct dma_map_ops *get_dma_ops(struct device *dev)
32{ 32{
33#ifdef CONFIG_X86_32 33#ifndef CONFIG_X86_DEV_DMA_OPS
34 return dma_ops; 34 return dma_ops;
35#else 35#else
36 if (unlikely(!dev) || !dev->archdata.dma_ops) 36 if (unlikely(!dev) || !dev->archdata.dma_ops)
@@ -62,6 +62,12 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
62 dma_addr_t *dma_addr, gfp_t flag, 62 dma_addr_t *dma_addr, gfp_t flag,
63 struct dma_attrs *attrs); 63 struct dma_attrs *attrs);
64 64
65#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
66extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
67extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
68extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
69#else
70
65static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 71static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
66{ 72{
67 if (!dev->dma_mask) 73 if (!dev->dma_mask)
@@ -79,6 +85,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
79{ 85{
80 return daddr; 86 return daddr;
81} 87}
88#endif /* CONFIG_X86_DMA_REMAP */
82 89
83static inline void 90static inline void
84dma_cache_sync(struct device *dev, void *vaddr, size_t size, 91dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 4fa88154e4de..75f4c6d6a331 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -290,14 +290,14 @@ static inline int __thread_has_fpu(struct task_struct *tsk)
290static inline void __thread_clear_has_fpu(struct task_struct *tsk) 290static inline void __thread_clear_has_fpu(struct task_struct *tsk)
291{ 291{
292 tsk->thread.fpu.has_fpu = 0; 292 tsk->thread.fpu.has_fpu = 0;
293 percpu_write(fpu_owner_task, NULL); 293 this_cpu_write(fpu_owner_task, NULL);
294} 294}
295 295
296/* Must be paired with a 'clts' before! */ 296/* Must be paired with a 'clts' before! */
297static inline void __thread_set_has_fpu(struct task_struct *tsk) 297static inline void __thread_set_has_fpu(struct task_struct *tsk)
298{ 298{
299 tsk->thread.fpu.has_fpu = 1; 299 tsk->thread.fpu.has_fpu = 1;
300 percpu_write(fpu_owner_task, tsk); 300 this_cpu_write(fpu_owner_task, tsk);
301} 301}
302 302
303/* 303/*
@@ -344,7 +344,7 @@ typedef struct { int preload; } fpu_switch_t;
344 */ 344 */
345static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) 345static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
346{ 346{
347 return new == percpu_read_stable(fpu_owner_task) && 347 return new == this_cpu_read_stable(fpu_owner_task) &&
348 cpu == new->thread.fpu.last_cpu; 348 cpu == new->thread.fpu.last_cpu;
349} 349}
350 350
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 382f75d735f3..d3895dbf4ddb 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -35,14 +35,15 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
35 35
36#define __ARCH_IRQ_STAT 36#define __ARCH_IRQ_STAT
37 37
38#define inc_irq_stat(member) percpu_inc(irq_stat.member) 38#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
39 39
40#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) 40#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
41 41
42#define __ARCH_SET_SOFTIRQ_PENDING 42#define __ARCH_SET_SOFTIRQ_PENDING
43 43
44#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) 44#define set_softirq_pending(x) \
45#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) 45 this_cpu_write(irq_stat.__softirq_pending, (x))
46#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
46 47
47extern void ack_bad_irq(unsigned int irq); 48extern void ack_bad_irq(unsigned int irq);
48 49
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index ee52760549f0..b04cbdb138cd 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -144,6 +144,12 @@ typedef struct compat_siginfo {
144 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ 144 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
145 int _fd; 145 int _fd;
146 } _sigpoll; 146 } _sigpoll;
147
148 struct {
149 unsigned int _call_addr; /* calling insn */
150 int _syscall; /* triggering system call number */
151 unsigned int _arch; /* AUDIT_ARCH_* of syscall */
152 } _sigsys;
147 } _sifields; 153 } _sifields;
148} compat_siginfo_t; 154} compat_siginfo_t;
149 155
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 2c4943de5150..73d8c5398ea9 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -5,7 +5,7 @@
5#include <asm/mpspec.h> 5#include <asm/mpspec.h>
6#include <asm/apicdef.h> 6#include <asm/apicdef.h>
7#include <asm/irq_vectors.h> 7#include <asm/irq_vectors.h>
8 8#include <asm/x86_init.h>
9/* 9/*
10 * Intel IO-APIC support for SMP and UP systems. 10 * Intel IO-APIC support for SMP and UP systems.
11 * 11 *
@@ -21,15 +21,6 @@
21#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15) 21#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
22#define IO_APIC_REDIR_MASKED (1 << 16) 22#define IO_APIC_REDIR_MASKED (1 << 16)
23 23
24struct io_apic_ops {
25 void (*init) (void);
26 unsigned int (*read) (unsigned int apic, unsigned int reg);
27 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
28 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
29};
30
31void __init set_io_apic_ops(const struct io_apic_ops *);
32
33/* 24/*
34 * The structure of the IO-APIC: 25 * The structure of the IO-APIC:
35 */ 26 */
@@ -156,7 +147,6 @@ struct io_apic_irq_attr;
156extern int io_apic_set_pci_routing(struct device *dev, int irq, 147extern int io_apic_set_pci_routing(struct device *dev, int irq,
157 struct io_apic_irq_attr *irq_attr); 148 struct io_apic_irq_attr *irq_attr);
158void setup_IO_APIC_irq_extra(u32 gsi); 149void setup_IO_APIC_irq_extra(u32 gsi);
159extern void ioapic_and_gsi_init(void);
160extern void ioapic_insert_resources(void); 150extern void ioapic_insert_resources(void);
161 151
162int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); 152int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
@@ -185,12 +175,29 @@ extern void mp_save_irq(struct mpc_intsrc *m);
185 175
186extern void disable_ioapic_support(void); 176extern void disable_ioapic_support(void);
187 177
178extern void __init native_io_apic_init_mappings(void);
179extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
180extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
181extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
182
183static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
184{
185 return x86_io_apic_ops.read(apic, reg);
186}
187
188static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
189{
190 x86_io_apic_ops.write(apic, reg, value);
191}
192static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
193{
194 x86_io_apic_ops.modify(apic, reg, value);
195}
188#else /* !CONFIG_X86_IO_APIC */ 196#else /* !CONFIG_X86_IO_APIC */
189 197
190#define io_apic_assign_pci_irqs 0 198#define io_apic_assign_pci_irqs 0
191#define setup_ioapic_ids_from_mpc x86_init_noop 199#define setup_ioapic_ids_from_mpc x86_init_noop
192static const int timer_through_8259 = 0; 200static const int timer_through_8259 = 0;
193static inline void ioapic_and_gsi_init(void) { }
194static inline void ioapic_insert_resources(void) { } 201static inline void ioapic_insert_resources(void) { }
195#define gsi_top (NR_IRQS_LEGACY) 202#define gsi_top (NR_IRQS_LEGACY)
196static inline int mp_find_ioapic(u32 gsi) { return 0; } 203static inline int mp_find_ioapic(u32 gsi) { return 0; }
@@ -212,6 +219,10 @@ static inline int restore_ioapic_entries(void)
212 219
213static inline void mp_save_irq(struct mpc_intsrc *m) { }; 220static inline void mp_save_irq(struct mpc_intsrc *m) { };
214static inline void disable_ioapic_support(void) { } 221static inline void disable_ioapic_support(void) { }
222#define native_io_apic_init_mappings NULL
223#define native_io_apic_read NULL
224#define native_io_apic_write NULL
225#define native_io_apic_modify NULL
215#endif 226#endif
216 227
217#endif /* _ASM_X86_IO_APIC_H */ 228#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
index 77843225b7ea..d82250b1debb 100644
--- a/arch/x86/include/asm/irq_regs.h
+++ b/arch/x86/include/asm/irq_regs.h
@@ -15,7 +15,7 @@ DECLARE_PER_CPU(struct pt_regs *, irq_regs);
15 15
16static inline struct pt_regs *get_irq_regs(void) 16static inline struct pt_regs *get_irq_regs(void)
17{ 17{
18 return percpu_read(irq_regs); 18 return this_cpu_read(irq_regs);
19} 19}
20 20
21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) 21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
@@ -23,7 +23,7 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
23 struct pt_regs *old_regs; 23 struct pt_regs *old_regs;
24 24
25 old_regs = get_irq_regs(); 25 old_regs = get_irq_regs();
26 percpu_write(irq_regs, new_regs); 26 this_cpu_write(irq_regs, new_regs);
27 27
28 return old_regs; 28 return old_regs;
29} 29}
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 47d99934580f..5fb9bbbd2f14 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -1,45 +1,101 @@
1#ifndef _ASM_X86_IRQ_REMAPPING_H 1/*
2#define _ASM_X86_IRQ_REMAPPING_H 2 * Copyright (C) 2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * This header file contains the interface of the interrupt remapping code to
19 * the x86 interrupt management code.
20 */
3 21
4#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) 22#ifndef __X86_IRQ_REMAPPING_H
23#define __X86_IRQ_REMAPPING_H
24
25#include <asm/io_apic.h>
5 26
6#ifdef CONFIG_IRQ_REMAP 27#ifdef CONFIG_IRQ_REMAP
7static void irq_remap_modify_chip_defaults(struct irq_chip *chip); 28
8static inline void prepare_irte(struct irte *irte, int vector, 29extern int irq_remapping_enabled;
9 unsigned int dest) 30
31extern void setup_irq_remapping_ops(void);
32extern int irq_remapping_supported(void);
33extern int irq_remapping_prepare(void);
34extern int irq_remapping_enable(void);
35extern void irq_remapping_disable(void);
36extern int irq_remapping_reenable(int);
37extern int irq_remap_enable_fault_handling(void);
38extern int setup_ioapic_remapped_entry(int irq,
39 struct IO_APIC_route_entry *entry,
40 unsigned int destination,
41 int vector,
42 struct io_apic_irq_attr *attr);
43extern int set_remapped_irq_affinity(struct irq_data *data,
44 const struct cpumask *mask,
45 bool force);
46extern void free_remapped_irq(int irq);
47extern void compose_remapped_msi_msg(struct pci_dev *pdev,
48 unsigned int irq, unsigned int dest,
49 struct msi_msg *msg, u8 hpet_id);
50extern int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
51extern int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
52 int index, int sub_handle);
53extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
54
55#else /* CONFIG_IRQ_REMAP */
56
57#define irq_remapping_enabled 0
58
59static inline void setup_irq_remapping_ops(void) { }
60static inline int irq_remapping_supported(void) { return 0; }
61static inline int irq_remapping_prepare(void) { return -ENODEV; }
62static inline int irq_remapping_enable(void) { return -ENODEV; }
63static inline void irq_remapping_disable(void) { }
64static inline int irq_remapping_reenable(int eim) { return -ENODEV; }
65static inline int irq_remap_enable_fault_handling(void) { return -ENODEV; }
66static inline int setup_ioapic_remapped_entry(int irq,
67 struct IO_APIC_route_entry *entry,
68 unsigned int destination,
69 int vector,
70 struct io_apic_irq_attr *attr)
71{
72 return -ENODEV;
73}
74static inline int set_remapped_irq_affinity(struct irq_data *data,
75 const struct cpumask *mask,
76 bool force)
10{ 77{
11 memset(irte, 0, sizeof(*irte)); 78 return 0;
12
13 irte->present = 1;
14 irte->dst_mode = apic->irq_dest_mode;
15 /*
16 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
17 * actual level or edge trigger will be setup in the IO-APIC
18 * RTE. This will help simplify level triggered irq migration.
19 * For more details, see the comments (in io_apic.c) explainig IO-APIC
20 * irq migration in the presence of interrupt-remapping.
21 */
22 irte->trigger_mode = 0;
23 irte->dlvry_mode = apic->irq_delivery_mode;
24 irte->vector = vector;
25 irte->dest_id = IRTE_DEST(dest);
26 irte->redir_hint = 1;
27} 79}
28static inline bool irq_remapped(struct irq_cfg *cfg) 80static inline void free_remapped_irq(int irq) { }
81static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
82 unsigned int irq, unsigned int dest,
83 struct msi_msg *msg, u8 hpet_id)
29{ 84{
30 return cfg->irq_2_iommu.iommu != NULL;
31} 85}
32#else 86static inline int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
33static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
34{ 87{
88 return -ENODEV;
35} 89}
36static inline bool irq_remapped(struct irq_cfg *cfg) 90static inline int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
91 int index, int sub_handle)
37{ 92{
38 return false; 93 return -ENODEV;
39} 94}
40static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip) 95static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
41{ 96{
97 return -ENODEV;
42} 98}
43#endif 99#endif /* CONFIG_IRQ_REMAP */
44 100
45#endif /* _ASM_X86_IRQ_REMAPPING_H */ 101#endif /* __X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/kbdleds.h b/arch/x86/include/asm/kbdleds.h
new file mode 100644
index 000000000000..f27ac5ff597d
--- /dev/null
+++ b/arch/x86/include/asm/kbdleds.h
@@ -0,0 +1,17 @@
1#ifndef _ASM_X86_KBDLEDS_H
2#define _ASM_X86_KBDLEDS_H
3
4/*
5 * Some laptops take the 789uiojklm,. keys as number pad when NumLock is on.
6 * This seems a good reason to start with NumLock off. That's why on X86 we
7 * ask the bios for the correct state.
8 */
9
10#include <asm/setup.h>
11
12static inline int kbd_defleds(void)
13{
14 return boot_params.kbd_status & 0x20 ? (1 << VC_NUMLOCK) : 0;
15}
16
17#endif /* _ASM_X86_KBDLEDS_H */
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index d73f1571bde7..2c37aadcbc35 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -24,7 +24,6 @@ enum die_val {
24extern void printk_address(unsigned long address, int reliable); 24extern void printk_address(unsigned long address, int reliable);
25extern void die(const char *, struct pt_regs *,long); 25extern void die(const char *, struct pt_regs *,long);
26extern int __must_check __die(const char *, struct pt_regs *, long); 26extern int __must_check __die(const char *, struct pt_regs *, long);
27extern void show_registers(struct pt_regs *regs);
28extern void show_trace(struct task_struct *t, struct pt_regs *regs, 27extern void show_trace(struct task_struct *t, struct pt_regs *regs,
29 unsigned long *sp, unsigned long bp); 28 unsigned long *sp, unsigned long bp);
30extern void __show_regs(struct pt_regs *regs, int all); 29extern void __show_regs(struct pt_regs *regs, int all);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e216ba066e79..e5b97be12d2a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -27,6 +27,7 @@
27#include <asm/desc.h> 27#include <asm/desc.h>
28#include <asm/mtrr.h> 28#include <asm/mtrr.h>
29#include <asm/msr-index.h> 29#include <asm/msr-index.h>
30#include <asm/asm.h>
30 31
31#define KVM_MAX_VCPUS 254 32#define KVM_MAX_VCPUS 254
32#define KVM_SOFT_MAX_VCPUS 160 33#define KVM_SOFT_MAX_VCPUS 160
@@ -921,9 +922,7 @@ extern bool kvm_rebooting;
921 __ASM_SIZE(push) " $666b \n\t" \ 922 __ASM_SIZE(push) " $666b \n\t" \
922 "call kvm_spurious_fault \n\t" \ 923 "call kvm_spurious_fault \n\t" \
923 ".popsection \n\t" \ 924 ".popsection \n\t" \
924 ".pushsection __ex_table, \"a\" \n\t" \ 925 _ASM_EXTABLE(666b, 667b)
925 _ASM_PTR " 666b, 667b \n\t" \
926 ".popsection"
927 926
928#define __kvm_handle_fault_on_reboot(insn) \ 927#define __kvm_handle_fault_on_reboot(insn) \
929 ____kvm_handle_fault_on_reboot(insn, "") 928 ____kvm_handle_fault_on_reboot(insn, "")
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 734c3767cfac..183922e13de1 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -170,6 +170,9 @@ static inline int kvm_para_available(void)
170 unsigned int eax, ebx, ecx, edx; 170 unsigned int eax, ebx, ecx, edx;
171 char signature[13]; 171 char signature[13];
172 172
173 if (boot_cpu_data.cpuid_level < 0)
174 return 0; /* So we don't blow up on old processors */
175
173 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); 176 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
174 memcpy(signature + 0, &ebx, 4); 177 memcpy(signature + 0, &ebx, 4);
175 memcpy(signature + 4, &ecx, 4); 178 memcpy(signature + 4, &ecx, 4);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 69021528b43c..cdbf36776106 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -25,8 +25,8 @@ void destroy_context(struct mm_struct *mm);
25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
26{ 26{
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 28 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); 29 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
30#endif 30#endif
31} 31}
32 32
@@ -37,8 +37,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
40 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 40 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
41 percpu_write(cpu_tlbstate.active_mm, next); 41 this_cpu_write(cpu_tlbstate.active_mm, next);
42#endif 42#endif
43 cpumask_set_cpu(cpu, mm_cpumask(next)); 43 cpumask_set_cpu(cpu, mm_cpumask(next));
44 44
@@ -56,8 +56,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
56 } 56 }
57#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
58 else { 58 else {
59 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 59 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
60 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); 60 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
61 61
62 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { 62 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
63 /* We were in lazy tlb mode and leave_mm disabled 63 /* We were in lazy tlb mode and leave_mm disabled
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 55728e121473..eb05fb3b02fb 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -61,10 +61,4 @@ static inline int pfn_valid(int pfn)
61 61
62#endif /* CONFIG_DISCONTIGMEM */ 62#endif /* CONFIG_DISCONTIGMEM */
63 63
64#ifdef CONFIG_NEED_MULTIPLE_NODES
65/* always use node 0 for bootmem on this numa platform */
66#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
67 (NODE_DATA(0)->bdata)
68#endif /* CONFIG_NEED_MULTIPLE_NODES */
69
70#endif /* _ASM_X86_MMZONE_32_H */ 64#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 95203d40ffdd..084ef95274cd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -169,14 +169,7 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
169 return native_write_msr_safe(msr, low, high); 169 return native_write_msr_safe(msr, low, high);
170} 170}
171 171
172/* 172/* rdmsr with exception handling */
173 * rdmsr with exception handling.
174 *
175 * Please note that the exception handling works only after we've
176 * switched to the "smart" #GP handler in trap_init() which knows about
177 * exception tables - using this macro earlier than that causes machine
178 * hangs on boxes which do not implement the @msr in the first argument.
179 */
180#define rdmsr_safe(msr, p1, p2) \ 173#define rdmsr_safe(msr, p1, p2) \
181({ \ 174({ \
182 int __err; \ 175 int __err; \
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index fd3f9f18cf3f..0e3793b821ef 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -27,6 +27,8 @@ void arch_trigger_all_cpu_backtrace(void);
27enum { 27enum {
28 NMI_LOCAL=0, 28 NMI_LOCAL=0,
29 NMI_UNKNOWN, 29 NMI_UNKNOWN,
30 NMI_SERR,
31 NMI_IO_CHECK,
30 NMI_MAX 32 NMI_MAX
31}; 33};
32 34
@@ -35,8 +37,24 @@ enum {
35 37
36typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); 38typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
37 39
38int register_nmi_handler(unsigned int, nmi_handler_t, unsigned long, 40struct nmiaction {
39 const char *); 41 struct list_head list;
42 nmi_handler_t handler;
43 unsigned long flags;
44 const char *name;
45};
46
47#define register_nmi_handler(t, fn, fg, n) \
48({ \
49 static struct nmiaction fn##_na = { \
50 .handler = (fn), \
51 .name = (n), \
52 .flags = (fg), \
53 }; \
54 __register_nmi_handler((t), &fn##_na); \
55})
56
57int __register_nmi_handler(unsigned int, struct nmiaction *);
40 58
41void unregister_nmi_handler(unsigned int, const char *); 59void unregister_nmi_handler(unsigned int, const char *);
42 60
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index 405b4032a60b..aff2b3356101 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -87,7 +87,11 @@
87#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 87#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
88#define P6_NOP5_ATOMIC P6_NOP5 88#define P6_NOP5_ATOMIC P6_NOP5
89 89
90#ifdef __ASSEMBLY__
91#define _ASM_MK_NOP(x) .byte x
92#else
90#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" 93#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
94#endif
91 95
92#if defined(CONFIG_MK7) 96#if defined(CONFIG_MK7)
93#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1) 97#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index ade619ff9e2a..ef17af013475 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -15,8 +15,8 @@
15 */ 15 */
16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
17 17
18#define THREAD_ORDER 1 18#define THREAD_SIZE_ORDER 1
19#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 19#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
20 20
21#define STACKFAULT_STACK 0 21#define STACKFAULT_STACK 0
22#define DOUBLEFAULT_STACK 1 22#define DOUBLEFAULT_STACK 1
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 7639dbf5d223..320f7bb95f76 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_PAGE_64_DEFS_H 1#ifndef _ASM_X86_PAGE_64_DEFS_H
2#define _ASM_X86_PAGE_64_DEFS_H 2#define _ASM_X86_PAGE_64_DEFS_H
3 3
4#define THREAD_ORDER 1 4#define THREAD_SIZE_ORDER 1
5#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 5#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
6#define CURRENT_MASK (~(THREAD_SIZE - 1)) 6#define CURRENT_MASK (~(THREAD_SIZE - 1))
7 7
8#define EXCEPTION_STACK_ORDER 0 8#define EXCEPTION_STACK_ORDER 0
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index aa0f91308367..6cbbabf52707 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1023,10 +1023,8 @@ extern void default_banner(void);
1023 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ 1023 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1024 ) 1024 )
1025 1025
1026#define GET_CR2_INTO_RCX \ 1026#define GET_CR2_INTO_RAX \
1027 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \ 1027 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
1028 movq %rax, %rcx; \
1029 xorq %rax, %rax;
1030 1028
1031#define PARAVIRT_ADJUST_EXCEPTION_FRAME \ 1029#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1032 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ 1030 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 7a11910a63c4..d9b8e3f7f42a 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -46,7 +46,7 @@
46 46
47#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
48#define __percpu_prefix "%%"__stringify(__percpu_seg)":" 48#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
49#define __my_cpu_offset percpu_read(this_cpu_off) 49#define __my_cpu_offset this_cpu_read(this_cpu_off)
50 50
51/* 51/*
52 * Compared to the generic __my_cpu_offset version, the following 52 * Compared to the generic __my_cpu_offset version, the following
@@ -351,23 +351,15 @@ do { \
351}) 351})
352 352
353/* 353/*
354 * percpu_read() makes gcc load the percpu variable every time it is 354 * this_cpu_read() makes gcc load the percpu variable every time it is
355 * accessed while percpu_read_stable() allows the value to be cached. 355 * accessed while this_cpu_read_stable() allows the value to be cached.
356 * percpu_read_stable() is more efficient and can be used if its value 356 * this_cpu_read_stable() is more efficient and can be used if its value
357 * is guaranteed to be valid across cpus. The current users include 357 * is guaranteed to be valid across cpus. The current users include
358 * get_current() and get_thread_info() both of which are actually 358 * get_current() and get_thread_info() both of which are actually
359 * per-thread variables implemented as per-cpu variables and thus 359 * per-thread variables implemented as per-cpu variables and thus
360 * stable for the duration of the respective task. 360 * stable for the duration of the respective task.
361 */ 361 */
362#define percpu_read(var) percpu_from_op("mov", var, "m" (var)) 362#define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
363#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
364#define percpu_write(var, val) percpu_to_op("mov", var, val)
365#define percpu_add(var, val) percpu_add_op(var, val)
366#define percpu_sub(var, val) percpu_add_op(var, -(val))
367#define percpu_and(var, val) percpu_to_op("and", var, val)
368#define percpu_or(var, val) percpu_to_op("or", var, val)
369#define percpu_xor(var, val) percpu_to_op("xor", var, val)
370#define percpu_inc(var) percpu_unary_op("inc", var)
371 363
372#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 364#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
373#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 365#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -512,7 +504,11 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
512{ 504{
513 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; 505 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
514 506
515 return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0; 507#ifdef CONFIG_X86_64
508 return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_8(*a)) != 0;
509#else
510 return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_4(*a)) != 0;
511#endif
516} 512}
517 513
518static inline int x86_this_cpu_variable_test_bit(int nr, 514static inline int x86_this_cpu_variable_test_bit(int nr,
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4fa7dcceb6c0..7745b257f035 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -579,9 +579,6 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
579/* Free all resources held by a thread. */ 579/* Free all resources held by a thread. */
580extern void release_thread(struct task_struct *); 580extern void release_thread(struct task_struct *);
581 581
582/* Prepare to copy thread state - unlazy all lazy state */
583extern void prepare_to_copy(struct task_struct *tsk);
584
585unsigned long get_wchan(struct task_struct *p); 582unsigned long get_wchan(struct task_struct *p);
586 583
587/* 584/*
@@ -974,8 +971,6 @@ extern bool cpu_has_amd_erratum(const int *);
974#define cpu_has_amd_erratum(x) (false) 971#define cpu_has_amd_erratum(x) (false)
975#endif /* CONFIG_CPU_SUP_AMD */ 972#endif /* CONFIG_CPU_SUP_AMD */
976 973
977void cpu_idle_wait(void);
978
979extern unsigned long arch_align_stack(unsigned long sp); 974extern unsigned long arch_align_stack(unsigned long sp);
980extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 975extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
981 976
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 165466233ab0..c48a95035a77 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -205,13 +205,15 @@
205 205
206#define IDT_ENTRIES 256 206#define IDT_ENTRIES 256
207#define NUM_EXCEPTION_VECTORS 32 207#define NUM_EXCEPTION_VECTORS 32
208/* Bitmask of exception vectors which push an error code on the stack */
209#define EXCEPTION_ERRCODE_MASK 0x00027d00
208#define GDT_SIZE (GDT_ENTRIES * 8) 210#define GDT_SIZE (GDT_ENTRIES * 8)
209#define GDT_ENTRY_TLS_ENTRIES 3 211#define GDT_ENTRY_TLS_ENTRIES 3
210#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) 212#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
211 213
212#ifdef __KERNEL__ 214#ifdef __KERNEL__
213#ifndef __ASSEMBLY__ 215#ifndef __ASSEMBLY__
214extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; 216extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
215 217
216/* 218/*
217 * Load a segment. Fall back on loading the zero 219 * Load a segment. Fall back on loading the zero
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 0434c400287c..f48394513c37 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -62,6 +62,8 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
62/* Static state in head.S used to set up a CPU */ 62/* Static state in head.S used to set up a CPU */
63extern unsigned long stack_start; /* Initial stack pointer address */ 63extern unsigned long stack_start; /* Initial stack pointer address */
64 64
65struct task_struct;
66
65struct smp_ops { 67struct smp_ops {
66 void (*smp_prepare_boot_cpu)(void); 68 void (*smp_prepare_boot_cpu)(void);
67 void (*smp_prepare_cpus)(unsigned max_cpus); 69 void (*smp_prepare_cpus)(unsigned max_cpus);
@@ -70,7 +72,7 @@ struct smp_ops {
70 void (*stop_other_cpus)(int wait); 72 void (*stop_other_cpus)(int wait);
71 void (*smp_send_reschedule)(int cpu); 73 void (*smp_send_reschedule)(int cpu);
72 74
73 int (*cpu_up)(unsigned cpu); 75 int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
74 int (*cpu_disable)(void); 76 int (*cpu_disable)(void);
75 void (*cpu_die)(unsigned int cpu); 77 void (*cpu_die)(unsigned int cpu);
76 void (*play_dead)(void); 78 void (*play_dead)(void);
@@ -113,9 +115,9 @@ static inline void smp_cpus_done(unsigned int max_cpus)
113 smp_ops.smp_cpus_done(max_cpus); 115 smp_ops.smp_cpus_done(max_cpus);
114} 116}
115 117
116static inline int __cpu_up(unsigned int cpu) 118static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
117{ 119{
118 return smp_ops.cpu_up(cpu); 120 return smp_ops.cpu_up(cpu, tidle);
119} 121}
120 122
121static inline int __cpu_disable(void) 123static inline int __cpu_disable(void)
@@ -152,7 +154,7 @@ void cpu_disable_common(void);
152void native_smp_prepare_boot_cpu(void); 154void native_smp_prepare_boot_cpu(void);
153void native_smp_prepare_cpus(unsigned int max_cpus); 155void native_smp_prepare_cpus(unsigned int max_cpus);
154void native_smp_cpus_done(unsigned int max_cpus); 156void native_smp_cpus_done(unsigned int max_cpus);
155int native_cpu_up(unsigned int cpunum); 157int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
156int native_cpu_disable(void); 158int native_cpu_disable(void);
157void native_cpu_die(unsigned int cpu); 159void native_cpu_die(unsigned int cpu);
158void native_play_dead(void); 160void native_play_dead(void);
@@ -162,6 +164,7 @@ int wbinvd_on_all_cpus(void);
162 164
163void native_send_call_func_ipi(const struct cpumask *mask); 165void native_send_call_func_ipi(const struct cpumask *mask);
164void native_send_call_func_single_ipi(int cpu); 166void native_send_call_func_single_ipi(int cpu);
167void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
165 168
166void smp_store_cpu_info(int id); 169void smp_store_cpu_info(int id);
167#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 170#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
@@ -188,11 +191,11 @@ extern unsigned disabled_cpus __cpuinitdata;
188 * from the initial startup. We map APIC_BASE very early in page_setup(), 191 * from the initial startup. We map APIC_BASE very early in page_setup(),
189 * so this is correct in the x86 case. 192 * so this is correct in the x86 case.
190 */ 193 */
191#define raw_smp_processor_id() (percpu_read(cpu_number)) 194#define raw_smp_processor_id() (this_cpu_read(cpu_number))
192extern int safe_smp_processor_id(void); 195extern int safe_smp_processor_id(void);
193 196
194#elif defined(CONFIG_X86_64_SMP) 197#elif defined(CONFIG_X86_64_SMP)
195#define raw_smp_processor_id() (percpu_read(cpu_number)) 198#define raw_smp_processor_id() (this_cpu_read(cpu_number))
196 199
197#define stack_smp_processor_id() \ 200#define stack_smp_processor_id() \
198({ \ 201({ \
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 76bfa2cf301d..b315a33867f2 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -20,10 +20,8 @@
20 20
21#ifdef CONFIG_X86_32 21#ifdef CONFIG_X86_32
22# define LOCK_PTR_REG "a" 22# define LOCK_PTR_REG "a"
23# define REG_PTR_MODE "k"
24#else 23#else
25# define LOCK_PTR_REG "D" 24# define LOCK_PTR_REG "D"
26# define REG_PTR_MODE "q"
27#endif 25#endif
28 26
29#if defined(CONFIG_X86_32) && \ 27#if defined(CONFIG_X86_32) && \
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index b5d9533d2c38..6a998598f172 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -75,9 +75,9 @@ static __always_inline void boot_init_stack_canary(void)
75 75
76 current->stack_canary = canary; 76 current->stack_canary = canary;
77#ifdef CONFIG_X86_64 77#ifdef CONFIG_X86_64
78 percpu_write(irq_stack_union.stack_canary, canary); 78 this_cpu_write(irq_stack_union.stack_canary, canary);
79#else 79#else
80 percpu_write(stack_canary.canary, canary); 80 this_cpu_write(stack_canary.canary, canary);
81#endif 81#endif
82} 82}
83 83
diff --git a/arch/x86/include/asm/stat.h b/arch/x86/include/asm/stat.h
index e0b1d9bbcbc6..7b3ddc348585 100644
--- a/arch/x86/include/asm/stat.h
+++ b/arch/x86/include/asm/stat.h
@@ -25,6 +25,12 @@ struct stat {
25 unsigned long __unused5; 25 unsigned long __unused5;
26}; 26};
27 27
28/* We don't need to memset the whole thing just to initialize the padding */
29#define INIT_STRUCT_STAT_PADDING(st) do { \
30 st.__unused4 = 0; \
31 st.__unused5 = 0; \
32} while (0)
33
28#define STAT64_HAS_BROKEN_ST_INO 1 34#define STAT64_HAS_BROKEN_ST_INO 1
29 35
30/* This matches struct stat64 in glibc2.1, hence the absolutely 36/* This matches struct stat64 in glibc2.1, hence the absolutely
@@ -63,6 +69,12 @@ struct stat64 {
63 unsigned long long st_ino; 69 unsigned long long st_ino;
64}; 70};
65 71
72/* We don't need to memset the whole thing just to initialize the padding */
73#define INIT_STRUCT_STAT64_PADDING(st) do { \
74 memset(&st.__pad0, 0, sizeof(st.__pad0)); \
75 memset(&st.__pad3, 0, sizeof(st.__pad3)); \
76} while (0)
77
66#else /* __i386__ */ 78#else /* __i386__ */
67 79
68struct stat { 80struct stat {
@@ -87,6 +99,15 @@ struct stat {
87 unsigned long st_ctime_nsec; 99 unsigned long st_ctime_nsec;
88 long __unused[3]; 100 long __unused[3];
89}; 101};
102
103/* We don't need to memset the whole thing just to initialize the padding */
104#define INIT_STRUCT_STAT_PADDING(st) do { \
105 st.__pad0 = 0; \
106 st.__unused[0] = 0; \
107 st.__unused[1] = 0; \
108 st.__unused[2] = 0; \
109} while (0)
110
90#endif 111#endif
91 112
92/* for 32bit emulation and 32 bit kernels */ 113/* for 32bit emulation and 32 bit kernels */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 386b78686c4d..1ace47b62592 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -13,9 +13,11 @@
13#ifndef _ASM_X86_SYSCALL_H 13#ifndef _ASM_X86_SYSCALL_H
14#define _ASM_X86_SYSCALL_H 14#define _ASM_X86_SYSCALL_H
15 15
16#include <linux/audit.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <asm/asm-offsets.h> /* For NR_syscalls */ 19#include <asm/asm-offsets.h> /* For NR_syscalls */
20#include <asm/thread_info.h> /* for TS_COMPAT */
19#include <asm/unistd.h> 21#include <asm/unistd.h>
20 22
21extern const unsigned long sys_call_table[]; 23extern const unsigned long sys_call_table[];
@@ -88,6 +90,12 @@ static inline void syscall_set_arguments(struct task_struct *task,
88 memcpy(&regs->bx + i, args, n * sizeof(args[0])); 90 memcpy(&regs->bx + i, args, n * sizeof(args[0]));
89} 91}
90 92
93static inline int syscall_get_arch(struct task_struct *task,
94 struct pt_regs *regs)
95{
96 return AUDIT_ARCH_I386;
97}
98
91#else /* CONFIG_X86_64 */ 99#else /* CONFIG_X86_64 */
92 100
93static inline void syscall_get_arguments(struct task_struct *task, 101static inline void syscall_get_arguments(struct task_struct *task,
@@ -212,6 +220,25 @@ static inline void syscall_set_arguments(struct task_struct *task,
212 } 220 }
213} 221}
214 222
223static inline int syscall_get_arch(struct task_struct *task,
224 struct pt_regs *regs)
225{
226#ifdef CONFIG_IA32_EMULATION
227 /*
228 * TS_COMPAT is set for 32-bit syscall entry and then
229 * remains set until we return to user mode.
230 *
231 * TIF_IA32 tasks should always have TS_COMPAT set at
232 * system call time.
233 *
234 * x32 tasks should be considered AUDIT_ARCH_X86_64.
235 */
236 if (task_thread_info(task)->status & TS_COMPAT)
237 return AUDIT_ARCH_I386;
238#endif
239 /* Both x32 and x86_64 are considered "64-bit". */
240 return AUDIT_ARCH_X86_64;
241}
215#endif /* CONFIG_X86_32 */ 242#endif /* CONFIG_X86_32 */
216 243
217#endif /* _ASM_X86_SYSCALL_H */ 244#endif /* _ASM_X86_SYSCALL_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index ad6df8ccd715..3c9aebc00d39 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -155,24 +155,6 @@ struct thread_info {
155 155
156#define PREEMPT_ACTIVE 0x10000000 156#define PREEMPT_ACTIVE 0x10000000
157 157
158/* thread information allocation */
159#ifdef CONFIG_DEBUG_STACK_USAGE
160#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
161#else
162#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
163#endif
164
165#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
166
167#define alloc_thread_info_node(tsk, node) \
168({ \
169 struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
170 THREAD_ORDER); \
171 struct thread_info *ret = page ? page_address(page) : NULL; \
172 \
173 ret; \
174})
175
176#ifdef CONFIG_X86_32 158#ifdef CONFIG_X86_32
177 159
178#define STACK_WARN (THREAD_SIZE/8) 160#define STACK_WARN (THREAD_SIZE/8)
@@ -222,7 +204,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack);
222static inline struct thread_info *current_thread_info(void) 204static inline struct thread_info *current_thread_info(void)
223{ 205{
224 struct thread_info *ti; 206 struct thread_info *ti;
225 ti = (void *)(percpu_read_stable(kernel_stack) + 207 ti = (void *)(this_cpu_read_stable(kernel_stack) +
226 KERNEL_STACK_OFFSET - THREAD_SIZE); 208 KERNEL_STACK_OFFSET - THREAD_SIZE);
227 return ti; 209 return ti;
228} 210}
@@ -282,8 +264,7 @@ static inline bool is_ia32_task(void)
282 264
283#ifndef __ASSEMBLY__ 265#ifndef __ASSEMBLY__
284extern void arch_task_cache_init(void); 266extern void arch_task_cache_init(void);
285extern void free_thread_info(struct thread_info *ti);
286extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 267extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
287#define arch_task_cache_init arch_task_cache_init 268extern void arch_release_task_struct(struct task_struct *tsk);
288#endif 269#endif
289#endif /* _ASM_X86_THREAD_INFO_H */ 270#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index c0e108e08079..36a1a2ab87d2 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,11 +62,7 @@ static inline void __flush_tlb_one(unsigned long addr)
62 __flush_tlb(); 62 __flush_tlb();
63} 63}
64 64
65#ifdef CONFIG_X86_32 65#define TLB_FLUSH_ALL -1UL
66# define TLB_FLUSH_ALL 0xffffffff
67#else
68# define TLB_FLUSH_ALL -1ULL
69#endif
70 66
71/* 67/*
72 * TLB flushing: 68 * TLB flushing:
@@ -156,8 +152,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
156 152
157static inline void reset_lazy_tlbstate(void) 153static inline void reset_lazy_tlbstate(void)
158{ 154{
159 percpu_write(cpu_tlbstate.state, 0); 155 this_cpu_write(cpu_tlbstate.state, 0);
160 percpu_write(cpu_tlbstate.active_mm, &init_mm); 156 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
161} 157}
162 158
163#endif /* SMP */ 159#endif /* SMP */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index b9676ae37ada..095b21507b6a 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -92,44 +92,6 @@ extern void setup_node_to_cpumask_map(void);
92 92
93#define pcibus_to_node(bus) __pcibus_to_node(bus) 93#define pcibus_to_node(bus) __pcibus_to_node(bus)
94 94
95#ifdef CONFIG_X86_32
96# define SD_CACHE_NICE_TRIES 1
97# define SD_IDLE_IDX 1
98#else
99# define SD_CACHE_NICE_TRIES 2
100# define SD_IDLE_IDX 2
101#endif
102
103/* sched_domains SD_NODE_INIT for NUMA machines */
104#define SD_NODE_INIT (struct sched_domain) { \
105 .min_interval = 8, \
106 .max_interval = 32, \
107 .busy_factor = 32, \
108 .imbalance_pct = 125, \
109 .cache_nice_tries = SD_CACHE_NICE_TRIES, \
110 .busy_idx = 3, \
111 .idle_idx = SD_IDLE_IDX, \
112 .newidle_idx = 0, \
113 .wake_idx = 0, \
114 .forkexec_idx = 0, \
115 \
116 .flags = 1*SD_LOAD_BALANCE \
117 | 1*SD_BALANCE_NEWIDLE \
118 | 1*SD_BALANCE_EXEC \
119 | 1*SD_BALANCE_FORK \
120 | 0*SD_BALANCE_WAKE \
121 | 1*SD_WAKE_AFFINE \
122 | 0*SD_PREFER_LOCAL \
123 | 0*SD_SHARE_CPUPOWER \
124 | 0*SD_POWERSAVINGS_BALANCE \
125 | 0*SD_SHARE_PKG_RESOURCES \
126 | 1*SD_SERIALIZE \
127 | 0*SD_PREFER_SIBLING \
128 , \
129 .last_balance = jiffies, \
130 .balance_interval = 1, \
131}
132
133extern int __node_distance(int, int); 95extern int __node_distance(int, int);
134#define node_distance(a, b) __node_distance(a, b) 96#define node_distance(a, b) __node_distance(a, b)
135 97
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index e0544597cfe7..851fe0dc13bc 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -79,11 +79,12 @@
79#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) 79#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
80 80
81/* 81/*
82 * The exception table consists of pairs of addresses: the first is the 82 * The exception table consists of pairs of addresses relative to the
83 * address of an instruction that is allowed to fault, and the second is 83 * exception table enty itself: the first is the address of an
84 * the address at which the program should continue. No registers are 84 * instruction that is allowed to fault, and the second is the address
85 * modified, so it is entirely up to the continuation code to figure out 85 * at which the program should continue. No registers are modified,
86 * what to do. 86 * so it is entirely up to the continuation code to figure out what to
87 * do.
87 * 88 *
88 * All the routines below use bits of fixup code that are out of line 89 * All the routines below use bits of fixup code that are out of line
89 * with the main instruction path. This means when everything is well, 90 * with the main instruction path. This means when everything is well,
@@ -92,10 +93,14 @@
92 */ 93 */
93 94
94struct exception_table_entry { 95struct exception_table_entry {
95 unsigned long insn, fixup; 96 int insn, fixup;
96}; 97};
98/* This is not the generic standard exception_table_entry format */
99#define ARCH_HAS_SORT_EXTABLE
100#define ARCH_HAS_SEARCH_EXTABLE
97 101
98extern int fixup_exception(struct pt_regs *regs); 102extern int fixup_exception(struct pt_regs *regs);
103extern int early_fixup_exception(unsigned long *ip);
99 104
100/* 105/*
101 * These are the main single-value transfer routines. They automatically 106 * These are the main single-value transfer routines. They automatically
@@ -202,8 +207,8 @@ extern int __get_user_bad(void);
202 asm volatile("1: movl %%eax,0(%1)\n" \ 207 asm volatile("1: movl %%eax,0(%1)\n" \
203 "2: movl %%edx,4(%1)\n" \ 208 "2: movl %%edx,4(%1)\n" \
204 "3:\n" \ 209 "3:\n" \
205 _ASM_EXTABLE(1b, 2b - 1b) \ 210 _ASM_EXTABLE_EX(1b, 2b) \
206 _ASM_EXTABLE(2b, 3b - 2b) \ 211 _ASM_EXTABLE_EX(2b, 3b) \
207 : : "A" (x), "r" (addr)) 212 : : "A" (x), "r" (addr))
208 213
209#define __put_user_x8(x, ptr, __ret_pu) \ 214#define __put_user_x8(x, ptr, __ret_pu) \
@@ -408,7 +413,7 @@ do { \
408#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 413#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
409 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 414 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
410 "2:\n" \ 415 "2:\n" \
411 _ASM_EXTABLE(1b, 2b - 1b) \ 416 _ASM_EXTABLE_EX(1b, 2b) \
412 : ltype(x) : "m" (__m(addr))) 417 : ltype(x) : "m" (__m(addr)))
413 418
414#define __put_user_nocheck(x, ptr, size) \ 419#define __put_user_nocheck(x, ptr, size) \
@@ -450,7 +455,7 @@ struct __large_struct { unsigned long buf[100]; };
450#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 455#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
451 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 456 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
452 "2:\n" \ 457 "2:\n" \
453 _ASM_EXTABLE(1b, 2b - 1b) \ 458 _ASM_EXTABLE_EX(1b, 2b) \
454 : : ltype(x), "m" (__m(addr))) 459 : : ltype(x), "m" (__m(addr)))
455 460
456/* 461/*
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index 6fe6767b7124..e58f03b206c3 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -43,4 +43,37 @@ static inline unsigned long has_zero(unsigned long a)
43 return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); 43 return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
44} 44}
45 45
46/*
47 * Load an unaligned word from kernel space.
48 *
49 * In the (very unlikely) case of the word being a page-crosser
50 * and the next page not being mapped, take the exception and
51 * return zeroes in the non-existing part.
52 */
53static inline unsigned long load_unaligned_zeropad(const void *addr)
54{
55 unsigned long ret, dummy;
56
57 asm(
58 "1:\tmov %2,%0\n"
59 "2:\n"
60 ".section .fixup,\"ax\"\n"
61 "3:\t"
62 "lea %2,%1\n\t"
63 "and %3,%1\n\t"
64 "mov (%1),%0\n\t"
65 "leal %2,%%ecx\n\t"
66 "andl %4,%%ecx\n\t"
67 "shll $3,%%ecx\n\t"
68 "shr %%cl,%0\n\t"
69 "jmp 2b\n"
70 ".previous\n"
71 _ASM_EXTABLE(1b, 3b)
72 :"=&r" (ret),"=&c" (dummy)
73 :"m" (*(unsigned long *)addr),
74 "i" (-sizeof(unsigned long)),
75 "i" (sizeof(unsigned long)-1));
76 return ret;
77}
78
46#endif /* _ASM_WORD_AT_A_TIME_H */ 79#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 764b66a4cf89..c090af10ac7d 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -188,11 +188,18 @@ struct x86_msi_ops {
188 void (*restore_msi_irqs)(struct pci_dev *dev, int irq); 188 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
189}; 189};
190 190
191struct x86_io_apic_ops {
192 void (*init) (void);
193 unsigned int (*read) (unsigned int apic, unsigned int reg);
194 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
195 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
196};
197
191extern struct x86_init_ops x86_init; 198extern struct x86_init_ops x86_init;
192extern struct x86_cpuinit_ops x86_cpuinit; 199extern struct x86_cpuinit_ops x86_cpuinit;
193extern struct x86_platform_ops x86_platform; 200extern struct x86_platform_ops x86_platform;
194extern struct x86_msi_ops x86_msi; 201extern struct x86_msi_ops x86_msi;
195 202extern struct x86_io_apic_ops x86_io_apic_ops;
196extern void x86_init_noop(void); 203extern void x86_init_noop(void);
197extern void x86_init_uint_noop(unsigned int unused); 204extern void x86_init_uint_noop(unsigned int unused);
198 205
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index c6ce2452f10c..8a1b6f9b594a 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -80,10 +80,7 @@ static inline int xsave_user(struct xsave_struct __user *buf)
80 "3: movl $-1,%[err]\n" 80 "3: movl $-1,%[err]\n"
81 " jmp 2b\n" 81 " jmp 2b\n"
82 ".previous\n" 82 ".previous\n"
83 ".section __ex_table,\"a\"\n" 83 _ASM_EXTABLE(1b,3b)
84 _ASM_ALIGN "\n"
85 _ASM_PTR "1b,3b\n"
86 ".previous"
87 : [err] "=r" (err) 84 : [err] "=r" (err)
88 : "D" (buf), "a" (-1), "d" (-1), "0" (0) 85 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
89 : "memory"); 86 : "memory");
@@ -106,10 +103,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
106 "3: movl $-1,%[err]\n" 103 "3: movl $-1,%[err]\n"
107 " jmp 2b\n" 104 " jmp 2b\n"
108 ".previous\n" 105 ".previous\n"
109 ".section __ex_table,\"a\"\n" 106 _ASM_EXTABLE(1b,3b)
110 _ASM_ALIGN "\n"
111 _ASM_PTR "1b,3b\n"
112 ".previous"
113 : [err] "=r" (err) 107 : [err] "=r" (err)
114 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) 108 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
115 : "memory"); /* memory required? */ 109 : "memory"); /* memory required? */