aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Makefile.um3
-rw-r--r--arch/x86/boot/compressed/head_32.S14
-rw-r--r--arch/x86/boot/compressed/head_64.S22
-rw-r--r--arch/x86/boot/tools/build.c15
-rw-r--r--arch/x86/ia32/ia32_aout.c32
-rw-r--r--arch/x86/include/asm/cmpxchg.h4
-rw-r--r--arch/x86/include/asm/posix_types.h6
-rw-r--r--arch/x86/include/asm/sigcontext.h2
-rw-r--r--arch/x86/include/asm/siginfo.h8
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/uaccess_32.h5
-rw-r--r--arch/x86/include/asm/uaccess_64.h4
-rw-r--r--arch/x86/include/asm/unistd.h6
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/acpi/sleep.h4
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S4
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S4
-rw-r--r--arch/x86/kernel/apic/apic.c34
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c7
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c6
-rw-r--r--arch/x86/kernel/cpu/amd.c11
-rw-r--r--arch/x86/kernel/cpu/common.c9
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c8
-rw-r--r--arch/x86/kernel/i387.c1
-rw-r--r--arch/x86/kernel/microcode_amd.c12
-rw-r--r--arch/x86/kernel/microcode_core.c10
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/kernel/x86_init.c1
-rw-r--r--arch/x86/kvm/pmu.c18
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/lib/insn.c53
-rw-r--r--arch/x86/lib/usercopy.c103
-rw-r--r--arch/x86/lib/usercopy_32.c87
-rw-r--r--arch/x86/lib/usercopy_64.c49
-rw-r--r--arch/x86/platform/mrst/mrst.c4
-rw-r--r--arch/x86/um/asm/barrier.h75
-rw-r--r--arch/x86/um/asm/system.h135
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/smp.c15
-rw-r--r--arch/x86/xen/xen-asm.S2
42 files changed, 382 insertions, 421 deletions
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 4be406abeefd..36b62bc52638 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32)
14 14
15export LDFLAGS 15export LDFLAGS
16 16
17LDS_EXTRA := -Ui386
18export LDS_EXTRA
19
17# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. 20# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
18include $(srctree)/arch/x86/Makefile_32.cpu 21include $(srctree)/arch/x86/Makefile_32.cpu
19 22
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index a0559930a180..c85e3ac99bba 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -33,6 +33,9 @@
33 __HEAD 33 __HEAD
34ENTRY(startup_32) 34ENTRY(startup_32)
35#ifdef CONFIG_EFI_STUB 35#ifdef CONFIG_EFI_STUB
36 jmp preferred_addr
37
38 .balign 0x10
36 /* 39 /*
37 * We don't need the return address, so set up the stack so 40 * We don't need the return address, so set up the stack so
38 * efi_main() can find its arugments. 41 * efi_main() can find its arugments.
@@ -41,12 +44,17 @@ ENTRY(startup_32)
41 44
42 call efi_main 45 call efi_main
43 cmpl $0, %eax 46 cmpl $0, %eax
44 je preferred_addr
45 movl %eax, %esi 47 movl %eax, %esi
46 call 1f 48 jne 2f
471: 491:
50 /* EFI init failed, so hang. */
51 hlt
52 jmp 1b
532:
54 call 3f
553:
48 popl %eax 56 popl %eax
49 subl $1b, %eax 57 subl $3b, %eax
50 subl BP_pref_address(%esi), %eax 58 subl BP_pref_address(%esi), %eax
51 add BP_code32_start(%esi), %eax 59 add BP_code32_start(%esi), %eax
52 leal preferred_addr(%eax), %eax 60 leal preferred_addr(%eax), %eax
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 558d76ce23bc..87e03a13d8e3 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -200,18 +200,28 @@ ENTRY(startup_64)
200 * entire text+data+bss and hopefully all of memory. 200 * entire text+data+bss and hopefully all of memory.
201 */ 201 */
202#ifdef CONFIG_EFI_STUB 202#ifdef CONFIG_EFI_STUB
203 pushq %rsi 203 /*
204 * The entry point for the PE/COFF executable is 0x210, so only
205 * legacy boot loaders will execute this jmp.
206 */
207 jmp preferred_addr
208
209 .org 0x210
204 mov %rcx, %rdi 210 mov %rcx, %rdi
205 mov %rdx, %rsi 211 mov %rdx, %rsi
206 call efi_main 212 call efi_main
207 popq %rsi
208 cmpq $0,%rax
209 je preferred_addr
210 movq %rax,%rsi 213 movq %rax,%rsi
211 call 1f 214 cmpq $0,%rax
215 jne 2f
2121: 2161:
217 /* EFI init failed, so hang. */
218 hlt
219 jmp 1b
2202:
221 call 3f
2223:
213 popq %rax 223 popq %rax
214 subq $1b, %rax 224 subq $3b, %rax
215 subq BP_pref_address(%rsi), %rax 225 subq BP_pref_address(%rsi), %rax
216 add BP_code32_start(%esi), %eax 226 add BP_code32_start(%esi), %eax
217 leaq preferred_addr(%rax), %rax 227 leaq preferred_addr(%rax), %rax
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index ed549767a231..24443a332083 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -205,8 +205,13 @@ int main(int argc, char ** argv)
205 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); 205 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
206 206
207#ifdef CONFIG_X86_32 207#ifdef CONFIG_X86_32
208 /* Address of entry point */ 208 /*
209 put_unaligned_le32(i, &buf[pe_header + 0x28]); 209 * Address of entry point.
210 *
211 * The EFI stub entry point is +16 bytes from the start of
212 * the .text section.
213 */
214 put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
210 215
211 /* .text size */ 216 /* .text size */
212 put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); 217 put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
@@ -217,9 +222,11 @@ int main(int argc, char ** argv)
217 /* 222 /*
218 * Address of entry point. startup_32 is at the beginning and 223 * Address of entry point. startup_32 is at the beginning and
219 * the 64-bit entry point (startup_64) is always 512 bytes 224 * the 64-bit entry point (startup_64) is always 512 bytes
220 * after. 225 * after. The EFI stub entry point is 16 bytes after that, as
226 * the first instruction allows legacy loaders to jump over
227 * the EFI stub initialisation
221 */ 228 */
222 put_unaligned_le32(i + 512, &buf[pe_header + 0x28]); 229 put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
223 230
224 /* .text size */ 231 /* .text size */
225 put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); 232 put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index d511d951a052..4824fb45560f 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -119,9 +119,7 @@ static void set_brk(unsigned long start, unsigned long end)
119 end = PAGE_ALIGN(end); 119 end = PAGE_ALIGN(end);
120 if (end <= start) 120 if (end <= start)
121 return; 121 return;
122 down_write(&current->mm->mmap_sem); 122 vm_brk(start, end - start);
123 do_brk(start, end - start);
124 up_write(&current->mm->mmap_sem);
125} 123}
126 124
127#ifdef CORE_DUMP 125#ifdef CORE_DUMP
@@ -332,9 +330,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
332 pos = 32; 330 pos = 32;
333 map_size = ex.a_text+ex.a_data; 331 map_size = ex.a_text+ex.a_data;
334 332
335 down_write(&current->mm->mmap_sem); 333 error = vm_brk(text_addr & PAGE_MASK, map_size);
336 error = do_brk(text_addr & PAGE_MASK, map_size);
337 up_write(&current->mm->mmap_sem);
338 334
339 if (error != (text_addr & PAGE_MASK)) { 335 if (error != (text_addr & PAGE_MASK)) {
340 send_sig(SIGKILL, current, 0); 336 send_sig(SIGKILL, current, 0);
@@ -373,9 +369,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
373 if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) { 369 if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
374 loff_t pos = fd_offset; 370 loff_t pos = fd_offset;
375 371
376 down_write(&current->mm->mmap_sem); 372 vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
377 do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
378 up_write(&current->mm->mmap_sem);
379 bprm->file->f_op->read(bprm->file, 373 bprm->file->f_op->read(bprm->file,
380 (char __user *)N_TXTADDR(ex), 374 (char __user *)N_TXTADDR(ex),
381 ex.a_text+ex.a_data, &pos); 375 ex.a_text+ex.a_data, &pos);
@@ -385,26 +379,22 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
385 goto beyond_if; 379 goto beyond_if;
386 } 380 }
387 381
388 down_write(&current->mm->mmap_sem); 382 error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
389 error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
390 PROT_READ | PROT_EXEC, 383 PROT_READ | PROT_EXEC,
391 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | 384 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
392 MAP_EXECUTABLE | MAP_32BIT, 385 MAP_EXECUTABLE | MAP_32BIT,
393 fd_offset); 386 fd_offset);
394 up_write(&current->mm->mmap_sem);
395 387
396 if (error != N_TXTADDR(ex)) { 388 if (error != N_TXTADDR(ex)) {
397 send_sig(SIGKILL, current, 0); 389 send_sig(SIGKILL, current, 0);
398 return error; 390 return error;
399 } 391 }
400 392
401 down_write(&current->mm->mmap_sem); 393 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
402 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
403 PROT_READ | PROT_WRITE | PROT_EXEC, 394 PROT_READ | PROT_WRITE | PROT_EXEC,
404 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | 395 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
405 MAP_EXECUTABLE | MAP_32BIT, 396 MAP_EXECUTABLE | MAP_32BIT,
406 fd_offset + ex.a_text); 397 fd_offset + ex.a_text);
407 up_write(&current->mm->mmap_sem);
408 if (error != N_DATADDR(ex)) { 398 if (error != N_DATADDR(ex)) {
409 send_sig(SIGKILL, current, 0); 399 send_sig(SIGKILL, current, 0);
410 return error; 400 return error;
@@ -476,9 +466,7 @@ static int load_aout_library(struct file *file)
476 error_time = jiffies; 466 error_time = jiffies;
477 } 467 }
478#endif 468#endif
479 down_write(&current->mm->mmap_sem); 469 vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
480 do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
481 up_write(&current->mm->mmap_sem);
482 470
483 file->f_op->read(file, (char __user *)start_addr, 471 file->f_op->read(file, (char __user *)start_addr,
484 ex.a_text + ex.a_data, &pos); 472 ex.a_text + ex.a_data, &pos);
@@ -490,12 +478,10 @@ static int load_aout_library(struct file *file)
490 goto out; 478 goto out;
491 } 479 }
492 /* Now use mmap to map the library into memory. */ 480 /* Now use mmap to map the library into memory. */
493 down_write(&current->mm->mmap_sem); 481 error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
494 error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
495 PROT_READ | PROT_WRITE | PROT_EXEC, 482 PROT_READ | PROT_WRITE | PROT_EXEC,
496 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT, 483 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT,
497 N_TXTOFF(ex)); 484 N_TXTOFF(ex));
498 up_write(&current->mm->mmap_sem);
499 retval = error; 485 retval = error;
500 if (error != start_addr) 486 if (error != start_addr)
501 goto out; 487 goto out;
@@ -503,9 +489,7 @@ static int load_aout_library(struct file *file)
503 len = PAGE_ALIGN(ex.a_text + ex.a_data); 489 len = PAGE_ALIGN(ex.a_text + ex.a_data);
504 bss = ex.a_text + ex.a_data + ex.a_bss; 490 bss = ex.a_text + ex.a_data + ex.a_bss;
505 if (bss > len) { 491 if (bss > len) {
506 down_write(&current->mm->mmap_sem); 492 error = vm_brk(start_addr + len, bss - len);
507 error = do_brk(start_addr + len, bss - len);
508 up_write(&current->mm->mmap_sem);
509 retval = error; 493 retval = error;
510 if (error != start_addr + len) 494 if (error != start_addr + len)
511 goto out; 495 goto out;
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index b3b733262909..99480e55973d 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -43,7 +43,7 @@ extern void __add_wrong_size(void)
43 switch (sizeof(*(ptr))) { \ 43 switch (sizeof(*(ptr))) { \
44 case __X86_CASE_B: \ 44 case __X86_CASE_B: \
45 asm volatile (lock #op "b %b0, %1\n" \ 45 asm volatile (lock #op "b %b0, %1\n" \
46 : "+r" (__ret), "+m" (*(ptr)) \ 46 : "+q" (__ret), "+m" (*(ptr)) \
47 : : "memory", "cc"); \ 47 : : "memory", "cc"); \
48 break; \ 48 break; \
49 case __X86_CASE_W: \ 49 case __X86_CASE_W: \
@@ -173,7 +173,7 @@ extern void __add_wrong_size(void)
173 switch (sizeof(*(ptr))) { \ 173 switch (sizeof(*(ptr))) { \
174 case __X86_CASE_B: \ 174 case __X86_CASE_B: \
175 asm volatile (lock "addb %b1, %0\n" \ 175 asm volatile (lock "addb %b1, %0\n" \
176 : "+m" (*(ptr)) : "ri" (inc) \ 176 : "+m" (*(ptr)) : "qi" (inc) \
177 : "memory", "cc"); \ 177 : "memory", "cc"); \
178 break; \ 178 break; \
179 case __X86_CASE_W: \ 179 case __X86_CASE_W: \
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index 3427b7798dbc..7ef7c3020e5c 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -7,9 +7,9 @@
7#else 7#else
8# ifdef __i386__ 8# ifdef __i386__
9# include "posix_types_32.h" 9# include "posix_types_32.h"
10# elif defined(__LP64__) 10# elif defined(__ILP32__)
11# include "posix_types_64.h"
12# else
13# include "posix_types_x32.h" 11# include "posix_types_x32.h"
12# else
13# include "posix_types_64.h"
14# endif 14# endif
15#endif 15#endif
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 4a085383af27..5ca71c065eef 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -257,7 +257,7 @@ struct sigcontext {
257 __u64 oldmask; 257 __u64 oldmask;
258 __u64 cr2; 258 __u64 cr2;
259 struct _fpstate __user *fpstate; /* zero when no FPU context */ 259 struct _fpstate __user *fpstate; /* zero when no FPU context */
260#ifndef __LP64__ 260#ifdef __ILP32__
261 __u32 __fpstate_pad; 261 __u32 __fpstate_pad;
262#endif 262#endif
263 __u64 reserved1[8]; 263 __u64 reserved1[8];
diff --git a/arch/x86/include/asm/siginfo.h b/arch/x86/include/asm/siginfo.h
index fc1aa5535646..34c47b3341c0 100644
--- a/arch/x86/include/asm/siginfo.h
+++ b/arch/x86/include/asm/siginfo.h
@@ -2,7 +2,13 @@
2#define _ASM_X86_SIGINFO_H 2#define _ASM_X86_SIGINFO_H
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) 5# ifdef __ILP32__ /* x32 */
6typedef long long __kernel_si_clock_t __attribute__((aligned(4)));
7# define __ARCH_SI_CLOCK_T __kernel_si_clock_t
8# define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8)))
9# else /* x86-64 */
10# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
11# endif
6#endif 12#endif
7 13
8#include <asm-generic/siginfo.h> 14#include <asm-generic/siginfo.h>
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8be5f54d9360..e0544597cfe7 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -557,6 +557,8 @@ struct __large_struct { unsigned long buf[100]; };
557 557
558extern unsigned long 558extern unsigned long
559copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 559copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
560extern __must_check long
561strncpy_from_user(char *dst, const char __user *src, long count);
560 562
561/* 563/*
562 * movsl can be slow when source and dest are not both 8-byte aligned 564 * movsl can be slow when source and dest are not both 8-byte aligned
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 566e803cc602..8084bc73b18c 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,11 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to,
213 return n; 213 return n;
214} 214}
215 215
216long __must_check strncpy_from_user(char *dst, const char __user *src,
217 long count);
218long __must_check __strncpy_from_user(char *dst,
219 const char __user *src, long count);
220
221/** 216/**
222 * strlen_user: - Get the size of a string in user space. 217 * strlen_user: - Get the size of a string in user space.
223 * @str: The string to measure. 218 * @str: The string to measure.
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 1c66d30971ad..fcd4b6f3ef02 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,10 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
208 } 208 }
209} 209}
210 210
211__must_check long
212strncpy_from_user(char *dst, const char __user *src, long count);
213__must_check long
214__strncpy_from_user(char *dst, const char __user *src, long count);
215__must_check long strnlen_user(const char __user *str, long n); 211__must_check long strnlen_user(const char __user *str, long n);
216__must_check long __strnlen_user(const char __user *str, long n); 212__must_check long __strnlen_user(const char __user *str, long n);
217__must_check long strlen_user(const char __user *str); 213__must_check long strlen_user(const char __user *str);
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 37cdc9d99bb1..4437001d8e3d 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -63,10 +63,10 @@
63#else 63#else
64# ifdef __i386__ 64# ifdef __i386__
65# include <asm/unistd_32.h> 65# include <asm/unistd_32.h>
66# elif defined(__LP64__) 66# elif defined(__ILP32__)
67# include <asm/unistd_64.h>
68# else
69# include <asm/unistd_x32.h> 67# include <asm/unistd_x32.h>
68# else
69# include <asm/unistd_64.h>
70# endif 70# endif
71#endif 71#endif
72 72
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index baaca8defec8..764b66a4cf89 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -195,6 +195,5 @@ extern struct x86_msi_ops x86_msi;
195 195
196extern void x86_init_noop(void); 196extern void x86_init_noop(void);
197extern void x86_init_uint_noop(unsigned int unused); 197extern void x86_init_uint_noop(unsigned int unused);
198extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node);
199 198
200#endif 199#endif
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 103b6ab368d3..146a49c763a4 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -24,6 +24,10 @@ unsigned long acpi_realmode_flags;
24static char temp_stack[4096]; 24static char temp_stack[4096];
25#endif 25#endif
26 26
27asmlinkage void acpi_enter_s3(void)
28{
29 acpi_enter_sleep_state(3, wake_sleep_flags);
30}
27/** 31/**
28 * acpi_suspend_lowlevel - save kernel state 32 * acpi_suspend_lowlevel - save kernel state
29 * 33 *
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index 416d4be13fef..d68677a2a010 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -3,12 +3,16 @@
3 */ 3 */
4 4
5#include <asm/trampoline.h> 5#include <asm/trampoline.h>
6#include <linux/linkage.h>
6 7
7extern unsigned long saved_video_mode; 8extern unsigned long saved_video_mode;
8extern long saved_magic; 9extern long saved_magic;
9 10
10extern int wakeup_pmode_return; 11extern int wakeup_pmode_return;
11 12
13extern u8 wake_sleep_flags;
14extern asmlinkage void acpi_enter_s3(void);
15
12extern unsigned long acpi_copy_wakeup_routine(unsigned long); 16extern unsigned long acpi_copy_wakeup_routine(unsigned long);
13extern void wakeup_long64(void); 17extern void wakeup_long64(void);
14 18
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 13ab720573e3..72610839f03b 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -74,9 +74,7 @@ restore_registers:
74ENTRY(do_suspend_lowlevel) 74ENTRY(do_suspend_lowlevel)
75 call save_processor_state 75 call save_processor_state
76 call save_registers 76 call save_registers
77 pushl $3 77 call acpi_enter_s3
78 call acpi_enter_sleep_state
79 addl $4, %esp
80 78
81# In case of S3 failure, we'll emerge here. Jump 79# In case of S3 failure, we'll emerge here. Jump
82# to ret_point to recover 80# to ret_point to recover
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8ea5164cbd04..014d1d28c397 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -71,9 +71,7 @@ ENTRY(do_suspend_lowlevel)
71 movq %rsi, saved_rsi 71 movq %rsi, saved_rsi
72 72
73 addq $8, %rsp 73 addq $8, %rsp
74 movl $3, %edi 74 call acpi_enter_s3
75 xorl %eax, %eax
76 call acpi_enter_sleep_state
77 /* in case something went wrong, restore the machine status and go on */ 75 /* in case something went wrong, restore the machine status and go on */
78 jmp resume_point 76 jmp resume_point
79 77
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 11544d8f1e97..edc24480469f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1637,9 +1637,11 @@ static int __init apic_verify(void)
1637 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 1637 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1638 1638
1639 /* The BIOS may have set up the APIC at some other address */ 1639 /* The BIOS may have set up the APIC at some other address */
1640 rdmsr(MSR_IA32_APICBASE, l, h); 1640 if (boot_cpu_data.x86 >= 6) {
1641 if (l & MSR_IA32_APICBASE_ENABLE) 1641 rdmsr(MSR_IA32_APICBASE, l, h);
1642 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; 1642 if (l & MSR_IA32_APICBASE_ENABLE)
1643 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1644 }
1643 1645
1644 pr_info("Found and enabled local APIC!\n"); 1646 pr_info("Found and enabled local APIC!\n");
1645 return 0; 1647 return 0;
@@ -1657,13 +1659,15 @@ int __init apic_force_enable(unsigned long addr)
1657 * MSR. This can only be done in software for Intel P6 or later 1659 * MSR. This can only be done in software for Intel P6 or later
1658 * and AMD K7 (Model > 1) or later. 1660 * and AMD K7 (Model > 1) or later.
1659 */ 1661 */
1660 rdmsr(MSR_IA32_APICBASE, l, h); 1662 if (boot_cpu_data.x86 >= 6) {
1661 if (!(l & MSR_IA32_APICBASE_ENABLE)) { 1663 rdmsr(MSR_IA32_APICBASE, l, h);
1662 pr_info("Local APIC disabled by BIOS -- reenabling.\n"); 1664 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1663 l &= ~MSR_IA32_APICBASE_BASE; 1665 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1664 l |= MSR_IA32_APICBASE_ENABLE | addr; 1666 l &= ~MSR_IA32_APICBASE_BASE;
1665 wrmsr(MSR_IA32_APICBASE, l, h); 1667 l |= MSR_IA32_APICBASE_ENABLE | addr;
1666 enabled_via_apicbase = 1; 1668 wrmsr(MSR_IA32_APICBASE, l, h);
1669 enabled_via_apicbase = 1;
1670 }
1667 } 1671 }
1668 return apic_verify(); 1672 return apic_verify();
1669} 1673}
@@ -2209,10 +2213,12 @@ static void lapic_resume(void)
2209 * FIXME! This will be wrong if we ever support suspend on 2213 * FIXME! This will be wrong if we ever support suspend on
2210 * SMP! We'll need to do this as part of the CPU restore! 2214 * SMP! We'll need to do this as part of the CPU restore!
2211 */ 2215 */
2212 rdmsr(MSR_IA32_APICBASE, l, h); 2216 if (boot_cpu_data.x86 >= 6) {
2213 l &= ~MSR_IA32_APICBASE_BASE; 2217 rdmsr(MSR_IA32_APICBASE, l, h);
2214 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; 2218 l &= ~MSR_IA32_APICBASE_BASE;
2215 wrmsr(MSR_IA32_APICBASE, l, h); 2219 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2220 wrmsr(MSR_IA32_APICBASE, l, h);
2221 }
2216 } 2222 }
2217 2223
2218 maxlvt = lapic_get_maxlvt(); 2224 maxlvt = lapic_get_maxlvt();
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 899803e03214..23e75422e013 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -207,8 +207,11 @@ static void __init map_csrs(void)
207 207
208static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) 208static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
209{ 209{
210 c->phys_proc_id = node; 210
211 per_cpu(cpu_llc_id, smp_processor_id()) = node; 211 if (c->phys_proc_id != node) {
212 c->phys_proc_id = node;
213 per_cpu(cpu_llc_id, smp_processor_id()) = node;
214 }
212} 215}
213 216
214static int __init numachip_system_init(void) 217static int __init numachip_system_init(void)
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 8a778db45e3a..991e315f4227 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -24,6 +24,12 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
24{ 24{
25 if (x2apic_phys) 25 if (x2apic_phys)
26 return x2apic_enabled(); 26 return x2apic_enabled();
27 else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
28 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
29 x2apic_enabled()) {
30 printk(KERN_DEBUG "System requires x2apic physical mode\n");
31 return 1;
32 }
27 else 33 else
28 return 0; 34 return 0;
29} 35}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 0a44b90602b0..1c67ca100e4c 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -26,7 +26,8 @@
26 * contact AMD for precise details and a CPU swap. 26 * contact AMD for precise details and a CPU swap.
27 * 27 *
28 * See http://www.multimania.com/poulot/k6bug.html 28 * See http://www.multimania.com/poulot/k6bug.html
29 * http://www.amd.com/K6/k6docs/revgd.html 29 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
30 * (Publication # 21266 Issue Date: August 1998)
30 * 31 *
31 * The following test is erm.. interesting. AMD neglected to up 32 * The following test is erm.. interesting. AMD neglected to up
32 * the chip setting when fixing the bug but they also tweaked some 33 * the chip setting when fixing the bug but they also tweaked some
@@ -94,7 +95,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
94 "system stability may be impaired when more than 32 MB are used.\n"); 95 "system stability may be impaired when more than 32 MB are used.\n");
95 else 96 else
96 printk(KERN_CONT "probably OK (after B9730xxxx).\n"); 97 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
97 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
98 } 98 }
99 99
100 /* K6 with old style WHCR */ 100 /* K6 with old style WHCR */
@@ -353,10 +353,11 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
353 node = per_cpu(cpu_llc_id, cpu); 353 node = per_cpu(cpu_llc_id, cpu);
354 354
355 /* 355 /*
356 * If core numbers are inconsistent, it's likely a multi-fabric platform, 356 * On multi-fabric platform (e.g. Numascale NumaChip) a
357 * so invoke platform-specific handler 357 * platform-specific handler needs to be called to fixup some
358 * IDs of the CPU.
358 */ 359 */
359 if (c->phys_proc_id != node) 360 if (x86_cpuinit.fixup_cpu_id)
360 x86_cpuinit.fixup_cpu_id(c, node); 361 x86_cpuinit.fixup_cpu_id(c, node);
361 362
362 if (!node_online(node)) { 363 if (!node_online(node)) {
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 67e258362a3d..cf79302198a6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1163,15 +1163,6 @@ static void dbg_restore_debug_regs(void)
1163#endif /* ! CONFIG_KGDB */ 1163#endif /* ! CONFIG_KGDB */
1164 1164
1165/* 1165/*
1166 * Prints an error where the NUMA and configured core-number mismatch and the
1167 * platform didn't override this to fix it up
1168 */
1169void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
1170{
1171 pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
1172}
1173
1174/*
1175 * cpu_init() initializes state that is per-CPU. Some data is already 1166 * cpu_init() initializes state that is per-CPU. Some data is already
1176 * initialized (naturally) in the bootstrap process, such as the GDT 1167 * initialized (naturally) in the bootstrap process, such as the GDT
1177 * and IDT. We reload them nevertheless, this function acts as a 1168 * and IDT. We reload them nevertheless, this function acts as a
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 73d08ed98a64..b8f3653dddbc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
433 /* check if @slot is already used or the index is already disabled */ 433 /* check if @slot is already used or the index is already disabled */
434 ret = amd_get_l3_disable_slot(nb, slot); 434 ret = amd_get_l3_disable_slot(nb, slot);
435 if (ret >= 0) 435 if (ret >= 0)
436 return -EINVAL; 436 return -EEXIST;
437 437
438 if (index > nb->l3_cache.indices) 438 if (index > nb->l3_cache.indices)
439 return -EINVAL; 439 return -EINVAL;
440 440
441 /* check whether the other slot has disabled the same index already */ 441 /* check whether the other slot has disabled the same index already */
442 if (index == amd_get_l3_disable_slot(nb, !slot)) 442 if (index == amd_get_l3_disable_slot(nb, !slot))
443 return -EINVAL; 443 return -EEXIST;
444 444
445 amd_l3_disable_index(nb, cpu, slot, index); 445 amd_l3_disable_index(nb, cpu, slot, index);
446 446
@@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); 468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
469 if (err) { 469 if (err) {
470 if (err == -EEXIST) 470 if (err == -EEXIST)
471 printk(KERN_WARNING "L3 disable slot %d in use!\n", 471 pr_warning("L3 slot %d in use/index already disabled!\n",
472 slot); 472 slot);
473 return err; 473 return err;
474 } 474 }
475 return count; 475 return count;
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 7734bcbb5a3a..2d6e6498c176 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -235,6 +235,7 @@ int init_fpu(struct task_struct *tsk)
235 if (tsk_used_math(tsk)) { 235 if (tsk_used_math(tsk)) {
236 if (HAVE_HWFP && tsk == current) 236 if (HAVE_HWFP && tsk == current)
237 unlazy_fpu(tsk); 237 unlazy_fpu(tsk);
238 tsk->thread.fpu.last_cpu = ~0;
238 return 0; 239 return 0;
239 } 240 }
240 241
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 73465aab28f8..8a2ce8fd41c0 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -82,11 +82,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
82{ 82{
83 struct cpuinfo_x86 *c = &cpu_data(cpu); 83 struct cpuinfo_x86 *c = &cpu_data(cpu);
84 84
85 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
86 pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
87 return -1;
88 }
89
90 csig->rev = c->microcode; 85 csig->rev = c->microcode;
91 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); 86 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
92 87
@@ -380,6 +375,13 @@ static struct microcode_ops microcode_amd_ops = {
380 375
381struct microcode_ops * __init init_amd_microcode(void) 376struct microcode_ops * __init init_amd_microcode(void)
382{ 377{
378 struct cpuinfo_x86 *c = &cpu_data(0);
379
380 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
381 pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
382 return NULL;
383 }
384
383 patch = (void *)get_zeroed_page(GFP_KERNEL); 385 patch = (void *)get_zeroed_page(GFP_KERNEL);
384 if (!patch) 386 if (!patch)
385 return NULL; 387 return NULL;
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 87a0f8688301..c9bda6d6035c 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -419,10 +419,8 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif)
419 if (err) 419 if (err)
420 return err; 420 return err;
421 421
422 if (microcode_init_cpu(cpu) == UCODE_ERROR) { 422 if (microcode_init_cpu(cpu) == UCODE_ERROR)
423 sysfs_remove_group(&dev->kobj, &mc_attr_group);
424 return -EINVAL; 423 return -EINVAL;
425 }
426 424
427 return err; 425 return err;
428} 426}
@@ -528,11 +526,11 @@ static int __init microcode_init(void)
528 microcode_ops = init_intel_microcode(); 526 microcode_ops = init_intel_microcode();
529 else if (c->x86_vendor == X86_VENDOR_AMD) 527 else if (c->x86_vendor == X86_VENDOR_AMD)
530 microcode_ops = init_amd_microcode(); 528 microcode_ops = init_amd_microcode();
531 529 else
532 if (!microcode_ops) {
533 pr_err("no support for this CPU vendor\n"); 530 pr_err("no support for this CPU vendor\n");
531
532 if (!microcode_ops)
534 return -ENODEV; 533 return -ENODEV;
535 }
536 534
537 microcode_pdev = platform_device_register_simple("microcode", -1, 535 microcode_pdev = platform_device_register_simple("microcode", -1,
538 NULL, 0); 536 NULL, 0);
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index f386dc49f988..7515cf0e1805 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -216,9 +216,9 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
216 current_thread_info()->sig_on_uaccess_error = 1; 216 current_thread_info()->sig_on_uaccess_error = 1;
217 217
218 /* 218 /*
219 * 0 is a valid user pointer (in the access_ok sense) on 32-bit and 219 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
220 * 64-bit, so we don't need to special-case it here. For all the 220 * 64-bit, so we don't need to special-case it here. For all the
221 * vsyscalls, 0 means "don't write anything" not "write it at 221 * vsyscalls, NULL means "don't write anything" not "write it at
222 * address 0". 222 * address 0".
223 */ 223 */
224 ret = -EFAULT; 224 ret = -EFAULT;
@@ -247,7 +247,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
247 247
248 ret = sys_getcpu((unsigned __user *)regs->di, 248 ret = sys_getcpu((unsigned __user *)regs->di,
249 (unsigned __user *)regs->si, 249 (unsigned __user *)regs->si,
250 0); 250 NULL);
251 break; 251 break;
252 } 252 }
253 253
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index e9f265fd79ae..9cf71d0b2d37 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -93,7 +93,6 @@ struct x86_init_ops x86_init __initdata = {
93struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 93struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
94 .early_percpu_clock_init = x86_init_noop, 94 .early_percpu_clock_init = x86_init_noop,
95 .setup_percpu_clockev = setup_secondary_APIC_clock, 95 .setup_percpu_clockev = setup_secondary_APIC_clock,
96 .fixup_cpu_id = x86_default_fixup_cpu_id,
97}; 96};
98 97
99static void default_nmi_init(void) { }; 98static void default_nmi_init(void) { };
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 173df38dbda5..2e88438ffd83 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -459,17 +459,17 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
459 pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1); 459 pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
460 460
461 if (pmu->version == 1) { 461 if (pmu->version == 1) {
462 pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1; 462 pmu->nr_arch_fixed_counters = 0;
463 return; 463 } else {
464 pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
465 X86_PMC_MAX_FIXED);
466 pmu->counter_bitmask[KVM_PMC_FIXED] =
467 ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
464 } 468 }
465 469
466 pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), 470 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
467 X86_PMC_MAX_FIXED); 471 (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED);
468 pmu->counter_bitmask[KVM_PMC_FIXED] = 472 pmu->global_ctrl_mask = ~pmu->global_ctrl;
469 ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
470 pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
471 | (((1ull << pmu->nr_arch_fixed_counters) - 1)
472 << X86_PMC_IDX_FIXED));
473} 473}
474 474
475void kvm_pmu_init(struct kvm_vcpu *vcpu) 475void kvm_pmu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ad85adfef843..4ff0ab9bc3c8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2210,9 +2210,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
2210 msr = find_msr_entry(vmx, msr_index); 2210 msr = find_msr_entry(vmx, msr_index);
2211 if (msr) { 2211 if (msr) {
2212 msr->data = data; 2212 msr->data = data;
2213 if (msr - vmx->guest_msrs < vmx->save_nmsrs) 2213 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
2214 preempt_disable();
2214 kvm_set_shared_msr(msr->index, msr->data, 2215 kvm_set_shared_msr(msr->index, msr->data,
2215 msr->mask); 2216 msr->mask);
2217 preempt_enable();
2218 }
2216 break; 2219 break;
2217 } 2220 }
2218 ret = kvm_set_msr_common(vcpu, msr_index, data); 2221 ret = kvm_set_msr_common(vcpu, msr_index, data);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4044ce0bf7c1..91a5e989abcf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6336,13 +6336,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
6336 if (npages && !old.rmap) { 6336 if (npages && !old.rmap) {
6337 unsigned long userspace_addr; 6337 unsigned long userspace_addr;
6338 6338
6339 down_write(&current->mm->mmap_sem); 6339 userspace_addr = vm_mmap(NULL, 0,
6340 userspace_addr = do_mmap(NULL, 0,
6341 npages * PAGE_SIZE, 6340 npages * PAGE_SIZE,
6342 PROT_READ | PROT_WRITE, 6341 PROT_READ | PROT_WRITE,
6343 map_flags, 6342 map_flags,
6344 0); 6343 0);
6345 up_write(&current->mm->mmap_sem);
6346 6344
6347 if (IS_ERR((void *)userspace_addr)) 6345 if (IS_ERR((void *)userspace_addr))
6348 return PTR_ERR((void *)userspace_addr); 6346 return PTR_ERR((void *)userspace_addr);
@@ -6366,10 +6364,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
6366 if (!user_alloc && !old.user_alloc && old.rmap && !npages) { 6364 if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
6367 int ret; 6365 int ret;
6368 6366
6369 down_write(&current->mm->mmap_sem); 6367 ret = vm_munmap(old.userspace_addr,
6370 ret = do_munmap(current->mm, old.userspace_addr,
6371 old.npages * PAGE_SIZE); 6368 old.npages * PAGE_SIZE);
6372 up_write(&current->mm->mmap_sem);
6373 if (ret < 0) 6369 if (ret < 0)
6374 printk(KERN_WARNING 6370 printk(KERN_WARNING
6375 "kvm_vm_ioctl_set_memory_region: " 6371 "kvm_vm_ioctl_set_memory_region: "
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 25feb1ae71c5..b1e6c4b2e8eb 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -379,8 +379,8 @@ err_out:
379 return; 379 return;
380} 380}
381 381
382/* Decode moffset16/32/64 */ 382/* Decode moffset16/32/64. Return 0 if failed */
383static void __get_moffset(struct insn *insn) 383static int __get_moffset(struct insn *insn)
384{ 384{
385 switch (insn->addr_bytes) { 385 switch (insn->addr_bytes) {
386 case 2: 386 case 2:
@@ -397,15 +397,19 @@ static void __get_moffset(struct insn *insn)
397 insn->moffset2.value = get_next(int, insn); 397 insn->moffset2.value = get_next(int, insn);
398 insn->moffset2.nbytes = 4; 398 insn->moffset2.nbytes = 4;
399 break; 399 break;
400 default: /* opnd_bytes must be modified manually */
401 goto err_out;
400 } 402 }
401 insn->moffset1.got = insn->moffset2.got = 1; 403 insn->moffset1.got = insn->moffset2.got = 1;
402 404
405 return 1;
406
403err_out: 407err_out:
404 return; 408 return 0;
405} 409}
406 410
407/* Decode imm v32(Iz) */ 411/* Decode imm v32(Iz). Return 0 if failed */
408static void __get_immv32(struct insn *insn) 412static int __get_immv32(struct insn *insn)
409{ 413{
410 switch (insn->opnd_bytes) { 414 switch (insn->opnd_bytes) {
411 case 2: 415 case 2:
@@ -417,14 +421,18 @@ static void __get_immv32(struct insn *insn)
417 insn->immediate.value = get_next(int, insn); 421 insn->immediate.value = get_next(int, insn);
418 insn->immediate.nbytes = 4; 422 insn->immediate.nbytes = 4;
419 break; 423 break;
424 default: /* opnd_bytes must be modified manually */
425 goto err_out;
420 } 426 }
421 427
428 return 1;
429
422err_out: 430err_out:
423 return; 431 return 0;
424} 432}
425 433
426/* Decode imm v64(Iv/Ov) */ 434/* Decode imm v64(Iv/Ov), Return 0 if failed */
427static void __get_immv(struct insn *insn) 435static int __get_immv(struct insn *insn)
428{ 436{
429 switch (insn->opnd_bytes) { 437 switch (insn->opnd_bytes) {
430 case 2: 438 case 2:
@@ -441,15 +449,18 @@ static void __get_immv(struct insn *insn)
441 insn->immediate2.value = get_next(int, insn); 449 insn->immediate2.value = get_next(int, insn);
442 insn->immediate2.nbytes = 4; 450 insn->immediate2.nbytes = 4;
443 break; 451 break;
452 default: /* opnd_bytes must be modified manually */
453 goto err_out;
444 } 454 }
445 insn->immediate1.got = insn->immediate2.got = 1; 455 insn->immediate1.got = insn->immediate2.got = 1;
446 456
457 return 1;
447err_out: 458err_out:
448 return; 459 return 0;
449} 460}
450 461
451/* Decode ptr16:16/32(Ap) */ 462/* Decode ptr16:16/32(Ap) */
452static void __get_immptr(struct insn *insn) 463static int __get_immptr(struct insn *insn)
453{ 464{
454 switch (insn->opnd_bytes) { 465 switch (insn->opnd_bytes) {
455 case 2: 466 case 2:
@@ -462,14 +473,17 @@ static void __get_immptr(struct insn *insn)
462 break; 473 break;
463 case 8: 474 case 8:
464 /* ptr16:64 is not exist (no segment) */ 475 /* ptr16:64 is not exist (no segment) */
465 return; 476 return 0;
477 default: /* opnd_bytes must be modified manually */
478 goto err_out;
466 } 479 }
467 insn->immediate2.value = get_next(unsigned short, insn); 480 insn->immediate2.value = get_next(unsigned short, insn);
468 insn->immediate2.nbytes = 2; 481 insn->immediate2.nbytes = 2;
469 insn->immediate1.got = insn->immediate2.got = 1; 482 insn->immediate1.got = insn->immediate2.got = 1;
470 483
484 return 1;
471err_out: 485err_out:
472 return; 486 return 0;
473} 487}
474 488
475/** 489/**
@@ -489,7 +503,8 @@ void insn_get_immediate(struct insn *insn)
489 insn_get_displacement(insn); 503 insn_get_displacement(insn);
490 504
491 if (inat_has_moffset(insn->attr)) { 505 if (inat_has_moffset(insn->attr)) {
492 __get_moffset(insn); 506 if (!__get_moffset(insn))
507 goto err_out;
493 goto done; 508 goto done;
494 } 509 }
495 510
@@ -517,16 +532,20 @@ void insn_get_immediate(struct insn *insn)
517 insn->immediate2.nbytes = 4; 532 insn->immediate2.nbytes = 4;
518 break; 533 break;
519 case INAT_IMM_PTR: 534 case INAT_IMM_PTR:
520 __get_immptr(insn); 535 if (!__get_immptr(insn))
536 goto err_out;
521 break; 537 break;
522 case INAT_IMM_VWORD32: 538 case INAT_IMM_VWORD32:
523 __get_immv32(insn); 539 if (!__get_immv32(insn))
540 goto err_out;
524 break; 541 break;
525 case INAT_IMM_VWORD: 542 case INAT_IMM_VWORD:
526 __get_immv(insn); 543 if (!__get_immv(insn))
544 goto err_out;
527 break; 545 break;
528 default: 546 default:
529 break; 547 /* Here, insn must have an immediate, but failed */
548 goto err_out;
530 } 549 }
531 if (inat_has_second_immediate(insn->attr)) { 550 if (inat_has_second_immediate(insn->attr)) {
532 insn->immediate2.value = get_next(char, insn); 551 insn->immediate2.value = get_next(char, insn);
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 97be9cb54483..d6ae30bbd7bb 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -7,6 +7,8 @@
7#include <linux/highmem.h> 7#include <linux/highmem.h>
8#include <linux/module.h> 8#include <linux/module.h>
9 9
10#include <asm/word-at-a-time.h>
11
10/* 12/*
11 * best effort, GUP based copy_from_user() that is NMI-safe 13 * best effort, GUP based copy_from_user() that is NMI-safe
12 */ 14 */
@@ -41,3 +43,104 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
41 return len; 43 return len;
42} 44}
43EXPORT_SYMBOL_GPL(copy_from_user_nmi); 45EXPORT_SYMBOL_GPL(copy_from_user_nmi);
46
47static inline unsigned long count_bytes(unsigned long mask)
48{
49 mask = (mask - 1) & ~mask;
50 mask >>= 7;
51 return count_masked_bytes(mask);
52}
53
54/*
55 * Do a strncpy, return length of string without final '\0'.
56 * 'count' is the user-supplied count (return 'count' if we
57 * hit it), 'max' is the address space maximum (and we return
58 * -EFAULT if we hit it).
59 */
60static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
61{
62 long res = 0;
63
64 /*
65 * Truncate 'max' to the user-specified limit, so that
66 * we only have one limit we need to check in the loop
67 */
68 if (max > count)
69 max = count;
70
71 while (max >= sizeof(unsigned long)) {
72 unsigned long c;
73
74 /* Fall back to byte-at-a-time if we get a page fault */
75 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
76 break;
77 /* This can write a few bytes past the NUL character, but that's ok */
78 *(unsigned long *)(dst+res) = c;
79 c = has_zero(c);
80 if (c)
81 return res + count_bytes(c);
82 res += sizeof(unsigned long);
83 max -= sizeof(unsigned long);
84 }
85
86 while (max) {
87 char c;
88
89 if (unlikely(__get_user(c,src+res)))
90 return -EFAULT;
91 dst[res] = c;
92 if (!c)
93 return res;
94 res++;
95 max--;
96 }
97
98 /*
99 * Uhhuh. We hit 'max'. But was that the user-specified maximum
100 * too? If so, that's ok - we got as much as the user asked for.
101 */
102 if (res >= count)
103 return res;
104
105 /*
106 * Nope: we hit the address space limit, and we still had more
107 * characters the caller would have wanted. That's an EFAULT.
108 */
109 return -EFAULT;
110}
111
112/**
113 * strncpy_from_user: - Copy a NUL terminated string from userspace.
114 * @dst: Destination address, in kernel space. This buffer must be at
115 * least @count bytes long.
116 * @src: Source address, in user space.
117 * @count: Maximum number of bytes to copy, including the trailing NUL.
118 *
119 * Copies a NUL-terminated string from userspace to kernel space.
120 *
121 * On success, returns the length of the string (not including the trailing
122 * NUL).
123 *
124 * If access to userspace fails, returns -EFAULT (some data may have been
125 * copied).
126 *
127 * If @count is smaller than the length of the string, copies @count bytes
128 * and returns @count.
129 */
130long
131strncpy_from_user(char *dst, const char __user *src, long count)
132{
133 unsigned long max_addr, src_addr;
134
135 if (unlikely(count <= 0))
136 return 0;
137
138 max_addr = current_thread_info()->addr_limit.seg;
139 src_addr = (unsigned long)src;
140 if (likely(src_addr < max_addr)) {
141 unsigned long max = max_addr - src_addr;
142 return do_strncpy_from_user(dst, src, count, max);
143 }
144 return -EFAULT;
145}
146EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index d9b094ca7aaa..ef2a6a5d78e3 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -33,93 +33,6 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
33 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) 33 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
34 34
35/* 35/*
36 * Copy a null terminated string from userspace.
37 */
38
39#define __do_strncpy_from_user(dst, src, count, res) \
40do { \
41 int __d0, __d1, __d2; \
42 might_fault(); \
43 __asm__ __volatile__( \
44 " testl %1,%1\n" \
45 " jz 2f\n" \
46 "0: lodsb\n" \
47 " stosb\n" \
48 " testb %%al,%%al\n" \
49 " jz 1f\n" \
50 " decl %1\n" \
51 " jnz 0b\n" \
52 "1: subl %1,%0\n" \
53 "2:\n" \
54 ".section .fixup,\"ax\"\n" \
55 "3: movl %5,%0\n" \
56 " jmp 2b\n" \
57 ".previous\n" \
58 _ASM_EXTABLE(0b,3b) \
59 : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
60 "=&D" (__d2) \
61 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
62 : "memory"); \
63} while (0)
64
65/**
66 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
67 * @dst: Destination address, in kernel space. This buffer must be at
68 * least @count bytes long.
69 * @src: Source address, in user space.
70 * @count: Maximum number of bytes to copy, including the trailing NUL.
71 *
72 * Copies a NUL-terminated string from userspace to kernel space.
73 * Caller must check the specified block with access_ok() before calling
74 * this function.
75 *
76 * On success, returns the length of the string (not including the trailing
77 * NUL).
78 *
79 * If access to userspace fails, returns -EFAULT (some data may have been
80 * copied).
81 *
82 * If @count is smaller than the length of the string, copies @count bytes
83 * and returns @count.
84 */
85long
86__strncpy_from_user(char *dst, const char __user *src, long count)
87{
88 long res;
89 __do_strncpy_from_user(dst, src, count, res);
90 return res;
91}
92EXPORT_SYMBOL(__strncpy_from_user);
93
94/**
95 * strncpy_from_user: - Copy a NUL terminated string from userspace.
96 * @dst: Destination address, in kernel space. This buffer must be at
97 * least @count bytes long.
98 * @src: Source address, in user space.
99 * @count: Maximum number of bytes to copy, including the trailing NUL.
100 *
101 * Copies a NUL-terminated string from userspace to kernel space.
102 *
103 * On success, returns the length of the string (not including the trailing
104 * NUL).
105 *
106 * If access to userspace fails, returns -EFAULT (some data may have been
107 * copied).
108 *
109 * If @count is smaller than the length of the string, copies @count bytes
110 * and returns @count.
111 */
112long
113strncpy_from_user(char *dst, const char __user *src, long count)
114{
115 long res = -EFAULT;
116 if (access_ok(VERIFY_READ, src, 1))
117 __do_strncpy_from_user(dst, src, count, res);
118 return res;
119}
120EXPORT_SYMBOL(strncpy_from_user);
121
122/*
123 * Zero Userspace 36 * Zero Userspace
124 */ 37 */
125 38
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index b7c2849ffb66..0d0326f388c0 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -9,55 +9,6 @@
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10 10
11/* 11/*
12 * Copy a null terminated string from userspace.
13 */
14
15#define __do_strncpy_from_user(dst,src,count,res) \
16do { \
17 long __d0, __d1, __d2; \
18 might_fault(); \
19 __asm__ __volatile__( \
20 " testq %1,%1\n" \
21 " jz 2f\n" \
22 "0: lodsb\n" \
23 " stosb\n" \
24 " testb %%al,%%al\n" \
25 " jz 1f\n" \
26 " decq %1\n" \
27 " jnz 0b\n" \
28 "1: subq %1,%0\n" \
29 "2:\n" \
30 ".section .fixup,\"ax\"\n" \
31 "3: movq %5,%0\n" \
32 " jmp 2b\n" \
33 ".previous\n" \
34 _ASM_EXTABLE(0b,3b) \
35 : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
36 "=&D" (__d2) \
37 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
38 : "memory"); \
39} while (0)
40
41long
42__strncpy_from_user(char *dst, const char __user *src, long count)
43{
44 long res;
45 __do_strncpy_from_user(dst, src, count, res);
46 return res;
47}
48EXPORT_SYMBOL(__strncpy_from_user);
49
50long
51strncpy_from_user(char *dst, const char __user *src, long count)
52{
53 long res = -EFAULT;
54 if (access_ok(VERIFY_READ, src, 1))
55 return __strncpy_from_user(dst, src, count);
56 return res;
57}
58EXPORT_SYMBOL(strncpy_from_user);
59
60/*
61 * Zero Userspace 12 * Zero Userspace
62 */ 13 */
63 14
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index e0a37233c0af..e31bcd8f2eee 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -805,7 +805,7 @@ void intel_scu_devices_create(void)
805 } else 805 } else
806 i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); 806 i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
807 } 807 }
808 intel_scu_notifier_post(SCU_AVAILABLE, 0L); 808 intel_scu_notifier_post(SCU_AVAILABLE, NULL);
809} 809}
810EXPORT_SYMBOL_GPL(intel_scu_devices_create); 810EXPORT_SYMBOL_GPL(intel_scu_devices_create);
811 811
@@ -814,7 +814,7 @@ void intel_scu_devices_destroy(void)
814{ 814{
815 int i; 815 int i;
816 816
817 intel_scu_notifier_post(SCU_DOWN, 0L); 817 intel_scu_notifier_post(SCU_DOWN, NULL);
818 818
819 for (i = 0; i < ipc_next_dev; i++) 819 for (i = 0; i < ipc_next_dev; i++)
820 platform_device_del(ipc_devs[i]); 820 platform_device_del(ipc_devs[i]);
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
new file mode 100644
index 000000000000..7d01b8c56c00
--- /dev/null
+++ b/arch/x86/um/asm/barrier.h
@@ -0,0 +1,75 @@
1#ifndef _ASM_UM_BARRIER_H_
2#define _ASM_UM_BARRIER_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/*
14 * Force strict CPU ordering.
15 * And yes, this is required on UP too when we're talking
16 * to devices.
17 */
18#ifdef CONFIG_X86_32
19
20#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
21#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
22#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
23
24#else /* CONFIG_X86_32 */
25
26#define mb() asm volatile("mfence" : : : "memory")
27#define rmb() asm volatile("lfence" : : : "memory")
28#define wmb() asm volatile("sfence" : : : "memory")
29
30#endif /* CONFIG_X86_32 */
31
32#define read_barrier_depends() do { } while (0)
33
34#ifdef CONFIG_SMP
35
36#define smp_mb() mb()
37#ifdef CONFIG_X86_PPRO_FENCE
38#define smp_rmb() rmb()
39#else /* CONFIG_X86_PPRO_FENCE */
40#define smp_rmb() barrier()
41#endif /* CONFIG_X86_PPRO_FENCE */
42
43#ifdef CONFIG_X86_OOSTORE
44#define smp_wmb() wmb()
45#else /* CONFIG_X86_OOSTORE */
46#define smp_wmb() barrier()
47#endif /* CONFIG_X86_OOSTORE */
48
49#define smp_read_barrier_depends() read_barrier_depends()
50#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
51
52#else /* CONFIG_SMP */
53
54#define smp_mb() barrier()
55#define smp_rmb() barrier()
56#define smp_wmb() barrier()
57#define smp_read_barrier_depends() do { } while (0)
58#define set_mb(var, value) do { var = value; barrier(); } while (0)
59
60#endif /* CONFIG_SMP */
61
62/*
63 * Stop RDTSC speculation. This is needed when you need to use RDTSC
64 * (or get_cycles or vread that possibly accesses the TSC) in a defined
65 * code region.
66 *
67 * (Could use an alternative three way for this if there was one.)
68 */
69static inline void rdtsc_barrier(void)
70{
71 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
72 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
73}
74
75#endif
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h
deleted file mode 100644
index a459fd9b7598..000000000000
--- a/arch/x86/um/asm/system.h
+++ /dev/null
@@ -1,135 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20extern unsigned long arch_align_stack(unsigned long sp);
21
22void default_idle(void);
23
24/*
25 * Force strict CPU ordering.
26 * And yes, this is required on UP too when we're talking
27 * to devices.
28 */
29#ifdef CONFIG_X86_32
30/*
31 * Some non-Intel clones support out of order store. wmb() ceases to be a
32 * nop for these.
33 */
34#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
35#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
36#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
37#else
38#define mb() asm volatile("mfence":::"memory")
39#define rmb() asm volatile("lfence":::"memory")
40#define wmb() asm volatile("sfence" ::: "memory")
41#endif
42
43/**
44 * read_barrier_depends - Flush all pending reads that subsequents reads
45 * depend on.
46 *
47 * No data-dependent reads from memory-like regions are ever reordered
48 * over this barrier. All reads preceding this primitive are guaranteed
49 * to access memory (but not necessarily other CPUs' caches) before any
50 * reads following this primitive that depend on the data return by
51 * any of the preceding reads. This primitive is much lighter weight than
52 * rmb() on most CPUs, and is never heavier weight than is
53 * rmb().
54 *
55 * These ordering constraints are respected by both the local CPU
56 * and the compiler.
57 *
58 * Ordering is not guaranteed by anything other than these primitives,
59 * not even by data dependencies. See the documentation for
60 * memory_barrier() for examples and URLs to more information.
61 *
62 * For example, the following code would force ordering (the initial
63 * value of "a" is zero, "b" is one, and "p" is "&a"):
64 *
65 * <programlisting>
66 * CPU 0 CPU 1
67 *
68 * b = 2;
69 * memory_barrier();
70 * p = &b; q = p;
71 * read_barrier_depends();
72 * d = *q;
73 * </programlisting>
74 *
75 * because the read of "*q" depends on the read of "p" and these
76 * two reads are separated by a read_barrier_depends(). However,
77 * the following code, with the same initial values for "a" and "b":
78 *
79 * <programlisting>
80 * CPU 0 CPU 1
81 *
82 * a = 2;
83 * memory_barrier();
84 * b = 3; y = b;
85 * read_barrier_depends();
86 * x = a;
87 * </programlisting>
88 *
89 * does not enforce ordering, since there is no data dependency between
90 * the read of "a" and the read of "b". Therefore, on some CPUs, such
91 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
92 * in cases like this where there are no data dependencies.
93 **/
94
95#define read_barrier_depends() do { } while (0)
96
97#ifdef CONFIG_SMP
98#define smp_mb() mb()
99#ifdef CONFIG_X86_PPRO_FENCE
100# define smp_rmb() rmb()
101#else
102# define smp_rmb() barrier()
103#endif
104#ifdef CONFIG_X86_OOSTORE
105# define smp_wmb() wmb()
106#else
107# define smp_wmb() barrier()
108#endif
109#define smp_read_barrier_depends() read_barrier_depends()
110#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
111#else
112#define smp_mb() barrier()
113#define smp_rmb() barrier()
114#define smp_wmb() barrier()
115#define smp_read_barrier_depends() do { } while (0)
116#define set_mb(var, value) do { var = value; barrier(); } while (0)
117#endif
118
119/*
120 * Stop RDTSC speculation. This is needed when you need to use RDTSC
121 * (or get_cycles or vread that possibly accesses the TSC) in a defined
122 * code region.
123 *
124 * (Could use an alternative three way for this if there was one.)
125 */
126static inline void rdtsc_barrier(void)
127{
128 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
129 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
130}
131
132extern void *_switch_to(void *prev, void *next, void *last);
133#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
134
135#endif
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 4f51bebac02c..a8f8844b8d32 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -261,7 +261,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
261 261
262static bool __init xen_check_mwait(void) 262static bool __init xen_check_mwait(void)
263{ 263{
264#ifdef CONFIG_ACPI 264#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \
265 !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
265 struct xen_platform_op op = { 266 struct xen_platform_op op = {
266 .cmd = XENPF_set_processor_pminfo, 267 .cmd = XENPF_set_processor_pminfo,
267 .u.set_pminfo.id = -1, 268 .u.set_pminfo.id = -1,
@@ -349,7 +350,6 @@ static void __init xen_init_cpuid_mask(void)
349 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ 350 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
350 if ((cx & xsave_mask) != xsave_mask) 351 if ((cx & xsave_mask) != xsave_mask)
351 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ 352 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
352
353 if (xen_check_mwait()) 353 if (xen_check_mwait())
354 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); 354 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
355} 355}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 5fac6919b957..0503c0c493a9 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -178,6 +178,7 @@ static void __init xen_fill_possible_map(void)
178static void __init xen_filter_cpu_maps(void) 178static void __init xen_filter_cpu_maps(void)
179{ 179{
180 int i, rc; 180 int i, rc;
181 unsigned int subtract = 0;
181 182
182 if (!xen_initial_domain()) 183 if (!xen_initial_domain())
183 return; 184 return;
@@ -192,8 +193,22 @@ static void __init xen_filter_cpu_maps(void)
192 } else { 193 } else {
193 set_cpu_possible(i, false); 194 set_cpu_possible(i, false);
194 set_cpu_present(i, false); 195 set_cpu_present(i, false);
196 subtract++;
195 } 197 }
196 } 198 }
199#ifdef CONFIG_HOTPLUG_CPU
200 /* This is akin to using 'nr_cpus' on the Linux command line.
201 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
202 * have up to X, while nr_cpu_ids is greater than X. This
203 * normally is not a problem, except when CPU hotplugging
204 * is involved and then there might be more than X CPUs
205 * in the guest - which will not work as there is no
206 * hypercall to expand the max number of VCPUs an already
207 * running guest has. So cap it up to X. */
208 if (subtract)
209 nr_cpu_ids = nr_cpu_ids - subtract;
210#endif
211
197} 212}
198 213
199static void __init xen_smp_prepare_boot_cpu(void) 214static void __init xen_smp_prepare_boot_cpu(void)
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 79d7362ad6d1..3e45aa000718 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct)
96 96
97 /* check for unmasked and pending */ 97 /* check for unmasked and pending */
98 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 98 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
99 jz 1f 99 jnz 1f
1002: call check_events 1002: call check_events
1011: 1011:
102ENDPATCH(xen_restore_fl_direct) 102ENDPATCH(xen_restore_fl_direct)